diff --git a/.arcconfig b/.arcconfig
new file mode 100644
index 000000000000..b9c39321eda3
--- /dev/null
+++ b/.arcconfig
@@ -0,0 +1,5 @@
+{
+ "project.name" : "ghc",
+ "repository.callsign" : "GHC",
+ "phabricator.uri" : "https://phabricator.haskell.org"
+}
diff --git a/.arclint b/.arclint
new file mode 100644
index 000000000000..f7980155fbff
--- /dev/null
+++ b/.arclint
@@ -0,0 +1,86 @@
+{
+ "linters": {
+ "filename": {
+ "type": "filename"
+ },
+ "generated": {
+ "type": "generated"
+ },
+ "merge-conflict": {
+ "type": "merge-conflict"
+ },
+ "nolint": {
+ "type": "nolint"
+ },
+ "haskell": {
+ "type": "text",
+ "include": ["(\\.(l?hs(-boot)?|x|y\\.pp)(\\.in)?$)"],
+ "severity": {
+ "5": "disabled"
+ }
+ },
+ "c": {
+ "type": "text",
+ "include": ["(\\.(c|h)(\\.in)?$)"]
+ },
+ "text-xml": {
+ "type": "text",
+ "include": "(\\.xml$)",
+ "severity": {
+ "5": "disabled",
+ "3": "disabled"
+ }
+ },
+ "shell": {
+ "type": "text",
+ "include": [ "(\\.sh$)" ],
+ "text.max-line-length": 200
+ },
+ "makefiles": {
+ "type": "text",
+ "include": [ "(Makefile$)", "(\\.mk$)" ],
+ "text.max-line-length": 200,
+ "severity": {
+ "2": "disabled"
+ }
+ }
+ },
+
+ "exclude":
+ [ "(^libffi-tarballs)",
+ "(^libraries/binary)",
+ "(^libraries/bytestring)",
+ "(^libraries/Cabal)",
+ "(^libraries/containers)",
+ "(^libraries/haskeline)",
+ "(^libraries/pretty)",
+ "(^libraries/terminfo)",
+ "(^libraries/transformers)",
+ "(^libraries/xhtml)",
+ "(^libraries/Win32)",
+ "(^libraries/primitive)",
+ "(^libraries/vector)",
+ "(^libraries/time)",
+ "(^libraries/random)",
+ "(^libraries/array)",
+ "(^libraries/deepseq)",
+ "(^libraries/directory)",
+ "(^libraries/filepath)",
+ "(^libraries/haskell98)",
+ "(^libraries/haskell2010)",
+ "(^libraries/hoopl)",
+ "(^libraries/hpc)",
+ "(^libraries/old-locale)",
+ "(^libraries/old-time)",
+ "(^libraries/process)",
+ "(^libraries/unix)",
+ "(^libraries/parallel)",
+ "(^libraries/stm)",
+ "(^libraries/dph)",
+ "(^utils/haddock)",
+ "(^nofib)",
+ "(^utils/hsc2hs)",
+ "(^libffi-tarballs)",
+ "(^ghc-tarballs)"
+ ]
+}
diff --git a/.gitignore b/.gitignore
index 57774d1ac27f..99bf3a6bab7c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,33 +46,6 @@ _darcs/
# sub-repositories
/ghc-tarballs/
-/libffi-tarballs/
-/libraries/array/
-/libraries/base/
-/libraries/deepseq/
-/libraries/directory/
-/libraries/dph/
-/libraries/extensible-exceptions/
-/libraries/filepath/
-/libraries/ghc-prim/
-/libraries/haskell2010/
-/libraries/haskell98/
-/libraries/hoopl/
-/libraries/hpc/
-/libraries/integer-gmp/
-/libraries/integer-simple/
-/libraries/mtl/
-/libraries/old-locale/
-/libraries/old-time/
-/libraries/parallel/
-/libraries/process/
-/libraries/stm/
-/libraries/template-haskell/
-/libraries/unix/
-/libraries/utf8-string/
-/nofib/
-/utils/haddock/
-/utils/hsc2hs/
# -----------------------------------------------------------------------------
# Cabal dist directories
@@ -173,5 +146,7 @@ _darcs/
/utils/runghc/runghc.cabal
/extra-gcc-opts
+/sdistprep
.tm_properties
+VERSION
diff --git a/.gitmodules b/.gitmodules
index f0fd2807abc1..66f4f37d6d9f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,56 +1,133 @@
[submodule "libraries/binary"]
path = libraries/binary
- url = http://git.haskell.org/packages/binary.git
+ url = ../packages/binary.git
ignore = untracked
[submodule "libraries/bytestring"]
path = libraries/bytestring
- url = http://git.haskell.org/packages/bytestring.git
+ url = ../packages/bytestring.git
ignore = untracked
[submodule "libraries/Cabal"]
path = libraries/Cabal
- url = http://git.haskell.org/packages/Cabal.git
+ url = ../packages/Cabal.git
ignore = untracked
[submodule "libraries/containers"]
path = libraries/containers
- url = http://git.haskell.org/packages/containers.git
+ url = ../packages/containers.git
ignore = untracked
[submodule "libraries/haskeline"]
path = libraries/haskeline
- url = http://git.haskell.org/packages/haskeline.git
+ url = ../packages/haskeline.git
ignore = untracked
[submodule "libraries/pretty"]
path = libraries/pretty
- url = http://git.haskell.org/packages/pretty.git
+ url = ../packages/pretty.git
ignore = untracked
[submodule "libraries/terminfo"]
path = libraries/terminfo
- url = http://git.haskell.org/packages/terminfo.git
+ url = ../packages/terminfo.git
ignore = untracked
[submodule "libraries/transformers"]
path = libraries/transformers
- url = http://git.haskell.org/packages/transformers.git
+ url = ../packages/transformers.git
ignore = untracked
[submodule "libraries/xhtml"]
path = libraries/xhtml
- url = http://git.haskell.org/packages/xhtml.git
+ url = ../packages/xhtml.git
ignore = untracked
[submodule "libraries/Win32"]
path = libraries/Win32
- url = http://git.haskell.org/packages/Win32.git
+ url = ../packages/Win32.git
ignore = untracked
[submodule "libraries/primitive"]
path = libraries/primitive
- url = http://git.haskell.org/packages/primitive.git
+ url = ../packages/primitive.git
ignore = untracked
[submodule "libraries/vector"]
path = libraries/vector
- url = http://git.haskell.org/packages/vector.git
+ url = ../packages/vector.git
ignore = untracked
[submodule "libraries/time"]
path = libraries/time
- url = http://git.haskell.org/packages/time.git
+ url = ../packages/time.git
ignore = untracked
[submodule "libraries/random"]
path = libraries/random
- url = http://git.haskell.org/packages/random.git
+ url = ../packages/random.git
ignore = untracked
+[submodule "libraries/array"]
+ path = libraries/array
+ url = ../packages/array.git
+ ignore = none
+[submodule "libraries/deepseq"]
+ path = libraries/deepseq
+ url = ../packages/deepseq.git
+ ignore = none
+[submodule "libraries/directory"]
+ path = libraries/directory
+ url = ../packages/directory.git
+ ignore = none
+[submodule "libraries/filepath"]
+ path = libraries/filepath
+ url = ../packages/filepath.git
+ ignore = none
+[submodule "libraries/haskell98"]
+ path = libraries/haskell98
+ url = ../packages/haskell98.git
+ ignore = none
+[submodule "libraries/haskell2010"]
+ path = libraries/haskell2010
+ url = ../packages/haskell2010.git
+ ignore = none
+[submodule "libraries/hoopl"]
+ path = libraries/hoopl
+ url = ../packages/hoopl.git
+ ignore = none
+[submodule "libraries/hpc"]
+ path = libraries/hpc
+ url = ../packages/hpc.git
+ ignore = none
+[submodule "libraries/old-locale"]
+ path = libraries/old-locale
+ url = ../packages/old-locale.git
+ ignore = none
+[submodule "libraries/old-time"]
+ path = libraries/old-time
+ url = ../packages/old-time.git
+ ignore = none
+[submodule "libraries/process"]
+ path = libraries/process
+ url = ../packages/process.git
+ ignore = none
+[submodule "libraries/unix"]
+ path = libraries/unix
+ url = ../packages/unix.git
+ ignore = none
+[submodule "libraries/parallel"]
+ path = libraries/parallel
+ url = ../packages/parallel.git
+ ignore = none
+[submodule "libraries/stm"]
+ path = libraries/stm
+ url = ../packages/stm.git
+ ignore = none
+[submodule "libraries/dph"]
+ path = libraries/dph
+ url = ../packages/dph.git
+ ignore = none
+[submodule "utils/haddock"]
+ path = utils/haddock
+ url = ../haddock.git
+ ignore = none
+ branch = ghc-head
+[submodule "nofib"]
+ path = nofib
+ url = ../nofib.git
+ ignore = none
+[submodule "utils/hsc2hs"]
+ path = utils/hsc2hs
+ url = ../hsc2hs.git
+ ignore = none
+[submodule "libffi-tarballs"]
+ path = libffi-tarballs
+ url = ../libffi-tarballs.git
+ ignore = none
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000000000000..aaf7dd776eda
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,36 @@
+git:
+ submodules: false
+
+notifications:
+ email:
+ - mail@joachim-breitner.de
+ - ghc-builds@haskell.org
+
+env:
+ - DEBUG_STAGE2=YES
+ - DEBUG_STAGE2=NO
+
+before_install:
+ - git config --global url."git://github.com/ghc/packages-".insteadOf git://github.com/ghc/packages/
+ - git config --global url."http://github.com/ghc/packages-".insteadOf http://github.com/ghc/packages/
+ - git config --global url."https://github.com/ghc/packages-".insteadOf https://github.com/ghc/packages/
+ - git config --global url."ssh://git@github.com/ghc/packages-".insteadOf ssh://git@github.com/ghc/packages/
+ - git config --global url."git@github.com:/ghc/packages-".insteadOf git@github.com:/ghc/packages/
+ - git submodule update --init --recursive
+install:
+ - sudo apt-get update
+ - sudo apt-get install haskell-platform autoconf libtool make ncurses-dev g++ dblatex docbook-xsl docbook-utils
+ - cabal update
+ - cabal install happy alex
+script:
+ - echo 'V = 0' >> mk/validate.mk # otherwise we hit log file limits on travis.
+ # do not build docs
+ - echo 'HADDOCK_DOCS = NO' >> mk/validate.mk
+ - echo 'BUILD_DOCBOOK_HTML = NO' >> mk/validate.mk
+ - echo 'BUILD_DOCBOOK_PS = NO' >> mk/validate.mk
+ - echo 'BUILD_DOCBOOK_PDF = NO' >> mk/validate.mk
+ # do not build dynamic libraries
+ - echo 'DYNAMIC_GHC_PROGRAMS = NO' >> mk/validate.mk
+ - echo 'GhcLibWays = v' >> mk/validate.mk
+ - if [ "$DEBUG_STAGE" = "YES" ]; then echo 'GhcStage2HcOpts += -DDEBUG' >> mk/validate.mk; fi
+ - CPUS=2 SKIP_PERF_TESTS=YES PATH=~/.cabal/bin:$PATH ./validate --fast
diff --git a/Makefile b/Makefile
index ce6735979205..c4cce6d4056d 100644
--- a/Makefile
+++ b/Makefile
@@ -30,10 +30,10 @@ default : all
# For help, type 'make help'
.PHONY: help
-help :
+help:
@cat MAKEHELP
-ifneq "$(findstring clean,$(MAKECMDGOALS))" ""
+ifneq "$(filter maintainer-clean distclean clean help,$(MAKECMDGOALS))" ""
-include mk/config.mk
else
include mk/config.mk
@@ -72,7 +72,7 @@ endif
$(MAKE) -r --no-print-directory -f ghc.mk phase=final $@
binary-dist: binary-dist-prep
- mv bindistprep/*.tar.bz2 .
+ mv bindistprep/*.tar.$(TAR_COMP_EXT) .
binary-dist-prep:
ifeq "$(mingw32_TARGET_OS)" "1"
diff --git a/README.md b/README.md
index c9c38f1960f7..f35df7256b74 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,8 @@
The Glasgow Haskell Compiler
============================
+[](http://travis-ci.org/ghc/ghc)
+
This is the source tree for [GHC][1], a compiler and interactive
environment for the Haskell functional programming language.
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 000000000000..9f6f1a0ab6c7
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,50 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+MACHINES =
+ { "ubuntu1204-i386" =>
+ { :box => "chef/ubuntu-12.04-i386",
+ :provision => "utils/vagrant/bootstrap-deb.sh"
+ },
+ "ubuntu1204-amd64" =>
+ { :box => "chef/ubuntu-12.04",
+ :provision => "utils/vagrant/bootstrap-deb.sh"
+ },
+ "centos65-i386" =>
+ { :box => "chef/centos-6.5-i386",
+ :provision => "utils/vagrant/bootstrap-rhel.sh"
+ },
+ "centos65-amd64" =>
+ { :box => "chef/centos-6.5",
+ :provision => "utils/vagrant/bootstrap-rhel.sh"
+ },
+ "debian74-i386" =>
+ { :box => "chef/debian-7.4-i386",
+ :provision => "utils/vagrant/bootstrap-deb.sh"
+ },
+ "debian74-amd64" =>
+ { :box => "chef/debian-7.4",
+ :provision => "utils/vagrant/bootstrap-deb.sh"
+ }
+ }
+
+VAGRANTFILE_API_VERSION = "2"
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ MACHINES.each_pair do |name, opts|
+ config.vm.define name do |c|
+ c.vm.box = opts[:box]
+ c.vm.network "public_network"
+ c.vm.provision :shell, :path => opts[:provision]
+ c.vm.provider "virtualbox" do |vb|
+ vb.gui = false; vb.memory = 4096; vb.cpus = 2
+ vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
+ end
+ c.vm.provider "vmware_workstation" do |vb|
+ vb.gui = false; vb.vmx["memsize"] = "4096"; vb.vmx["numvcpus"] = "2"
+ end
+ c.vm.provider "vmware_fusion" do |vb|
+ vb.gui = false; vb.vmx["memsize"] = "4096"; vb.vmx["numvcpus"] = "2"
+ end
+ end
+ end
+end
diff --git a/aclocal.m4 b/aclocal.m4
index a30fa4fa67ee..d3a32b80c2d0 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -197,6 +197,9 @@ AC_DEFUN([FPTOOLS_SET_HASKELL_PLATFORM_VARS],
GET_ARM_ISA()
test -z "[$]2" || eval "[$]2=\"ArchARM {armISA = \$ARM_ISA, armISAExt = \$ARM_ISA_EXT, armABI = \$ARM_ABI}\""
;;
+ aarch64)
+ test -z "[$]2" || eval "[$]2=ArchARM64"
+ ;;
alpha)
test -z "[$]2" || eval "[$]2=ArchAlpha"
;;
@@ -206,7 +209,7 @@ AC_DEFUN([FPTOOLS_SET_HASKELL_PLATFORM_VARS],
mipsel)
test -z "[$]2" || eval "[$]2=ArchMipsel"
;;
- hppa|hppa1_1|ia64|m68k|rs6000|s390|s390x|sparc64|vax)
+ hppa|hppa1_1|ia64|m68k|powerpc64le|rs6000|s390|s390x|sparc64|vax)
test -z "[$]2" || eval "[$]2=ArchUnknown"
;;
*)
@@ -451,6 +454,8 @@ AC_DEFUN([FP_SETTINGS],
then
mingw_bin_prefix=mingw/bin/
SettingsCCompilerCommand="\$topdir/../${mingw_bin_prefix}gcc.exe"
+ SettingsHaskellCPPCommand="\$topdir/../${mingw_bin_prefix}gcc.exe"
+ SettingsHaskellCPPFlags="$HaskellCPPArgs"
SettingsLdCommand="\$topdir/../${mingw_bin_prefix}ld.exe"
SettingsArCommand="\$topdir/../${mingw_bin_prefix}ar.exe"
SettingsPerlCommand='$topdir/../perl/perl.exe'
@@ -459,6 +464,8 @@ AC_DEFUN([FP_SETTINGS],
SettingsTouchCommand='$topdir/touchy.exe'
else
SettingsCCompilerCommand="$WhatGccIsCalled"
+ SettingsHaskellCPPCommand="$HaskellCPPCmd"
+ SettingsHaskellCPPFlags="$HaskellCPPArgs"
SettingsLdCommand="$LdCmd"
SettingsArCommand="$ArCmd"
SettingsPerlCommand="$PerlCmd"
@@ -483,6 +490,8 @@ AC_DEFUN([FP_SETTINGS],
SettingsCCompilerLinkFlags="$CONF_GCC_LINKER_OPTS_STAGE2"
SettingsLdFlags="$CONF_LD_LINKER_OPTS_STAGE2"
AC_SUBST(SettingsCCompilerCommand)
+ AC_SUBST(SettingsHaskellCPPCommand)
+ AC_SUBST(SettingsHaskellCPPFlags)
AC_SUBST(SettingsCCompilerFlags)
AC_SUBST(SettingsCCompilerLinkFlags)
AC_SUBST(SettingsLdCommand)
@@ -517,6 +526,12 @@ AC_DEFUN([FPTOOLS_SET_C_LD_FLAGS],
esac
case $$1 in
+ i386-unknown-mingw32)
+ $2="$$2 -march=i686"
+ ;;
+ i386-portbld-freebsd*)
+ $2="$$2 -march=i686"
+ ;;
i386-apple-darwin)
$2="$$2 -m32"
$3="$$3 -m32"
@@ -529,6 +544,12 @@ AC_DEFUN([FPTOOLS_SET_C_LD_FLAGS],
$4="$$4 -arch x86_64"
$5="$$5 -m64"
;;
+ x86_64-unknown-solaris2)
+ $2="$$2 -m64"
+ $3="$$3 -m64"
+ $4="$$4 -m64"
+ $5="$$5 -m64"
+ ;;
alpha-*)
# For now, to suppress the gcc warning "call-clobbered
# register used for global register variable", we simply
@@ -643,6 +664,10 @@ AC_ARG_WITH($2,
else
$1=$withval
fi
+
+ # Remember that we set this manually. Used to override CC_STAGE0
+ # and friends later, if we are not cross-compiling.
+ With_$2=$withval
],
[
if test "$HostOS" != "mingw32"
@@ -685,6 +710,10 @@ AC_ARG_WITH($2,
else
$1=$withval
fi
+
+ # Remember that we set this manually. Used to override CC_STAGE0
+ # and friends later, if we are not cross-compiling.
+ With_$2=$withval
],
[
if test "$HostOS" != "mingw32"
@@ -695,6 +724,8 @@ AC_ARG_WITH($2,
)
]) # FP_ARG_WITH_PATH_GNU_PROG_OPTIONAL
+
+
# FP_PROG_CONTEXT_DIFF
# --------------------
# Figure out how to do context diffs. Sets the output variable ContextDiffCmd.
@@ -864,7 +895,7 @@ else
fi;
changequote([, ])dnl
])
-if test ! -f compiler/parser/Parser.hs || test ! -f compiler/cmm/CmmParse.hs || test ! -f compiler/parser/ParserCore.hs
+if test ! -f compiler/parser/Parser.hs || test ! -f compiler/cmm/CmmParse.hs
then
FP_COMPARE_VERSIONS([$fptools_cv_happy_version],[-lt],[1.19],
[AC_MSG_ERROR([Happy version 1.19 or later is required to compile GHC.])])[]
@@ -1137,6 +1168,16 @@ AC_DEFUN([FP_PROG_AR_NEEDS_RANLIB],[
esac
fi
+ # workaround for AC_PROG_RANLIB which sets RANLIB to `:' when
+ # ranlib is missing on the target OS. The problem is that
+ # ghc-cabal cannot execute `:' which is a shell built-in but can
+ # execute `true' which is usually simple program supported by the
+ # OS.
+ # Fixes #8795
+ if test "$RANLIB" = ":"
+ then
+ RANLIB="true"
+ fi
REAL_RANLIB_CMD="$RANLIB"
if test $fp_cv_prog_ar_needs_ranlib = yes
then
@@ -1788,7 +1829,12 @@ AC_MSG_NOTICE(Building in-tree ghc-pwd)
dnl except we don't want to have to know what make is called. Sigh.
rm -rf utils/ghc-pwd/dist-boot
mkdir utils/ghc-pwd/dist-boot
- if ! "$WithGhc" -v0 -no-user-$GHC_PACKAGE_DB_FLAG -hidir utils/ghc-pwd/dist-boot -odir utils/ghc-pwd/dist-boot -stubdir utils/ghc-pwd/dist-boot --make utils/ghc-pwd/Main.hs -o utils/ghc-pwd/dist-boot/ghc-pwd
+ dnl If special linker flags are needed to build things, then allow
+ dnl the user to pass them in via LDFLAGS.
+ changequote(, )dnl
+ GHC_LDFLAGS=`echo $LDFLAGS | sed -r 's/(^| )([^ ])/\1-optl\2/g'`
+ changequote([, ])dnl
+ if ! "$WithGhc" $GHC_LDFLAGS -v0 -no-user-$GHC_PACKAGE_DB_FLAG -hidir utils/ghc-pwd/dist-boot -odir utils/ghc-pwd/dist-boot -stubdir utils/ghc-pwd/dist-boot --make utils/ghc-pwd/Main.hs -o utils/ghc-pwd/dist-boot/ghc-pwd
then
AC_MSG_ERROR([Building ghc-pwd failed])
fi
@@ -1831,6 +1877,9 @@ AC_MSG_CHECKING(for path to top of build tree)
# converts cpu from gnu to ghc naming, and assigns the result to $target_var
AC_DEFUN([GHC_CONVERT_CPU],[
case "$1" in
+ aarch64*)
+ $2="aarch64"
+ ;;
alpha*)
$2="alpha"
;;
@@ -1861,6 +1910,9 @@ case "$1" in
mips*)
$2="mips"
;;
+ powerpc64le*)
+ $2="powerpc64le"
+ ;;
powerpc64*)
$2="powerpc64"
;;
@@ -2017,7 +2069,11 @@ AC_DEFUN([FIND_LLVM_PROG],[
IFS=":;"
for p in ${PATH}; do
if test -d "${p}"; then
- $1=`${FindCmd} "${p}" -type f -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' -or -type l -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' | ${SortCmd} -n | tail -1`
+ if test "$windows" = YES; then
+ $1=`${FindCmd} "${p}" -type f -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' -or -type l -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' | ${SortCmd} -n | tail -1`
+ else
+ $1=`${FindCmd} "${p}" -type f -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' -or -type l -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' | ${SortCmd} -n | tail -1`
+ fi
if test -n "$$1"; then
break
fi
@@ -2048,7 +2104,8 @@ AC_DEFUN([FIND_GCC],[
$1="$CC"
else
FP_ARG_WITH_PATH_GNU_PROG_OPTIONAL([$1], [$2], [$3])
- # From Xcode 5 on, OS X command line tools do not include gcc anymore. Use clang.
+ # From Xcode 5 on/, OS X command line tools do not include gcc
+ # anymore. Use clang.
if test -z "$$1"
then
FP_ARG_WITH_PATH_GNU_PROG_OPTIONAL([$1], [clang], [clang])
@@ -2061,4 +2118,13 @@ AC_DEFUN([FIND_GCC],[
AC_SUBST($1)
])
+AC_DEFUN([MAYBE_OVERRIDE_STAGE0],[
+ if test ! -z "$With_$1" -a "$CrossCompiling" != "YES"; then
+ AC_MSG_NOTICE([Not cross-compiling, so --with-$1 also sets $2])
+ $2=$With_$1
+ fi
+])
+
+
+
# LocalWords: fi
diff --git a/bindisttest/Makefile b/bindisttest/Makefile
index ecd029afd9b1..bc805c849415 100644
--- a/bindisttest/Makefile
+++ b/bindisttest/Makefile
@@ -21,9 +21,9 @@ include $(TOP)/mk/tree.mk
include $(TOP)/mk/config.mk
ifeq "$(TEST_PREP)" "YES"
-BIN_DIST_TEST_TAR_BZ2 = ../$(BIN_DIST_PREP_TAR_BZ2)
+BIN_DIST_TEST_TAR_COMP = ../$(BIN_DIST_PREP_TAR_COMP)
else
-BIN_DIST_TEST_TAR_BZ2 = ../$(BIN_DIST_TAR_BZ2)
+BIN_DIST_TEST_TAR_COMP = ../$(BIN_DIST_TAR_COMP)
endif
all:
@@ -36,7 +36,7 @@ all:
# NB. tar has funny interpretation of filenames sometimes (thinking
# c:/foo is a remote file), so it's safer to bzip and then pipe into
# tar rather than using tar -xjf:
- cd a/b/c/ && $(BZIP2_CMD) -cd ../../../$(BIN_DIST_TEST_TAR_BZ2) | $(TAR_CMD) -xf -
+ cd a/b/c/ && $(TAR_COMP_CMD) -cd ../../../$(BIN_DIST_TEST_TAR_COMP) | $(TAR_CMD) -xf -
ifeq "$(Windows)" "YES"
mv a/b/c/$(BIN_DIST_NAME) $(BIN_DIST_INST_DIR)
else
diff --git a/bindisttest/ghc.mk b/bindisttest/ghc.mk
index 0126e391d87f..4b211064b127 100644
--- a/bindisttest/ghc.mk
+++ b/bindisttest/ghc.mk
@@ -11,9 +11,9 @@
# -----------------------------------------------------------------------------
ifeq "$(TEST_PREP)" "YES"
-BIN_DIST_TEST_TAR_BZ2 = $(BIN_DIST_PREP_TAR_BZ2)
+BIN_DIST_TEST_TAR_COMP = $(BIN_DIST_PREP_TAR_COMP)
else
-BIN_DIST_TEST_TAR_BZ2 = $(BIN_DIST_TAR_BZ2)
+BIN_DIST_TEST_TAR_COMP = $(BIN_DIST_TAR_COMP)
endif
.PHONY: test_bindist
@@ -33,7 +33,7 @@ test_bindist:
mkdir bindisttest/a
mkdir bindisttest/a/b
mkdir bindisttest/a/b/c
- cd bindisttest/a/b/c/ && $(BZIP2_CMD) -cd ../../../../$(BIN_DIST_TEST_TAR_BZ2) | $(TAR_CMD) -xf -
+ cd bindisttest/a/b/c/ && $(TAR_COMP_CMD) -cd ../../../../$(BIN_DIST_TEST_TAR_COMP) | $(TAR_CMD) -xf -
$(SHELL) bindisttest/checkBinaries.sh $(ProjectVersion)
ifeq "$(Windows_Host)" "YES"
mv bindisttest/a/b/c/$(BIN_DIST_NAME) $(BIN_DIST_INST_DIR)
diff --git a/boot b/boot
index 676f434bdb06..8977eaf9a39d 100755
--- a/boot
+++ b/boot
@@ -1,5 +1,6 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
+use warnings;
use strict;
use Cwd;
diff --git a/compiler/basicTypes/BasicTypes.lhs b/compiler/basicTypes/BasicTypes.lhs
index 9a92b003bc38..2f86db77963b 100644
--- a/compiler/basicTypes/BasicTypes.lhs
+++ b/compiler/basicTypes/BasicTypes.lhs
@@ -41,7 +41,8 @@ module BasicTypes(
TopLevelFlag(..), isTopLevel, isNotTopLevel,
- OverlapFlag(..),
+ OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe,
+ hasOverlappingFlag, hasOverlappableFlag,
Boxity(..), isBoxed,
@@ -447,39 +448,92 @@ instance Outputable Origin where
-- | The semantics allowed for overlapping instances for a particular
-- instance. See Note [Safe Haskell isSafeOverlap] (in `InstEnv.lhs`) for a
-- explanation of the `isSafeOverlap` field.
-data OverlapFlag
- -- | This instance must not overlap another
- = NoOverlap { isSafeOverlap :: Bool }
-
- -- | Silently ignore this instance if you find a
- -- more specific one that matches the constraint
- -- you are trying to resolve
- --
- -- Example: constraint (Foo [Int])
- -- instances (Foo [Int])
- -- (Foo [a]) OverlapOk
- -- Since the second instance has the OverlapOk flag,
- -- the first instance will be chosen (otherwise
- -- its ambiguous which to choose)
- | OverlapOk { isSafeOverlap :: Bool }
-
- -- | Silently ignore this instance if you find any other that matches the
- -- constraing you are trying to resolve, including when checking if there are
- -- instances that do not match, but unify.
- --
- -- Example: constraint (Foo [b])
- -- instances (Foo [Int]) Incoherent
- -- (Foo [a])
- -- Without the Incoherent flag, we'd complain that
- -- instantiating 'b' would change which instance
- -- was chosen. See also note [Incoherent instances]
- | Incoherent { isSafeOverlap :: Bool }
+data OverlapFlag = OverlapFlag
+ { overlapMode :: OverlapMode
+ , isSafeOverlap :: Bool
+ } deriving (Eq, Data, Typeable)
+
+setOverlapModeMaybe :: OverlapFlag -> Maybe OverlapMode -> OverlapFlag
+setOverlapModeMaybe f Nothing = f
+setOverlapModeMaybe f (Just m) = f { overlapMode = m }
+
+hasOverlappableFlag :: OverlapMode -> Bool
+hasOverlappableFlag mode =
+ case mode of
+ Overlappable -> True
+ Overlaps -> True
+ Incoherent -> True
+ _ -> False
+
+hasOverlappingFlag :: OverlapMode -> Bool
+hasOverlappingFlag mode =
+ case mode of
+ Overlapping -> True
+ Overlaps -> True
+ Incoherent -> True
+ _ -> False
+
+data OverlapMode -- See Note [Rules for instance lookup] in InstEnv
+ = NoOverlap
+ -- ^ This instance must not overlap another `NoOverlap` instance.
+ -- However, it may be overlapped by `Overlapping` instances,
+ -- and it may overlap `Overlappable` instances.
+
+
+ | Overlappable
+ -- ^ Silently ignore this instance if you find a
+ -- more specific one that matches the constraint
+ -- you are trying to resolve
+ --
+ -- Example: constraint (Foo [Int])
+ -- instance Foo [Int]
+ -- instance {-# OVERLAPPABLE #-} Foo [a]
+ --
+ -- Since the second instance has the Overlappable flag,
+ -- the first instance will be chosen (otherwise
+ -- its ambiguous which to choose)
+
+
+ | Overlapping
+ -- ^ Silently ignore any more general instances that may be
+ -- used to solve the constraint.
+ --
+ -- Example: constraint (Foo [Int])
+ -- instance {-# OVERLAPPING #-} Foo [Int]
+ -- instance Foo [a]
+ --
+ -- Since the first instance has the Overlapping flag,
+ -- the second---more general---instance will be ignored (otherwise
+ -- it is ambiguous which to choose)
+
+
+ | Overlaps
+ -- ^ Equivalent to having both `Overlapping` and `Overlappable` flags.
+
+ | Incoherent
+ -- ^ Behave like Overlappable and Overlapping, and in addition pick
+ -- an an arbitrary one if there are multiple matching candidates, and
+ -- don't worry about later instantiation
+ --
+ -- Example: constraint (Foo [b])
+ -- instance {-# INCOHERENT -} Foo [Int]
+ -- instance Foo [a]
+ -- Without the Incoherent flag, we'd complain that
+ -- instantiating 'b' would change which instance
+ -- was chosen. See also note [Incoherent instances] in InstEnv
+
deriving (Eq, Data, Typeable)
+
instance Outputable OverlapFlag where
- ppr (NoOverlap b) = empty <+> pprSafeOverlap b
- ppr (OverlapOk b) = ptext (sLit "[overlap ok]") <+> pprSafeOverlap b
- ppr (Incoherent b) = ptext (sLit "[incoherent]") <+> pprSafeOverlap b
+ ppr flag = ppr (overlapMode flag) <+> pprSafeOverlap (isSafeOverlap flag)
+
+instance Outputable OverlapMode where
+ ppr NoOverlap = empty
+ ppr Overlappable = ptext (sLit "[overlappable]")
+ ppr Overlapping = ptext (sLit "[overlapping]")
+ ppr Overlaps = ptext (sLit "[overlap ok]")
+ ppr Incoherent = ptext (sLit "[incoherent]")
pprSafeOverlap :: Bool -> SDoc
pprSafeOverlap True = ptext $ sLit "[safe]"
@@ -761,7 +815,7 @@ data InlinePragma -- Note [InlinePragma]
, inl_rule :: RuleMatchInfo -- Should the function be treated like a constructor?
} deriving( Eq, Data, Typeable )
-data InlineSpec -- What the user's INLINE pragama looked like
+data InlineSpec -- What the user's INLINE pragma looked like
= Inline
| Inlinable
| NoInline
diff --git a/compiler/basicTypes/ConLike.lhs b/compiler/basicTypes/ConLike.lhs
index de10d0fb0a2c..3414aa4230e4 100644
--- a/compiler/basicTypes/ConLike.lhs
+++ b/compiler/basicTypes/ConLike.lhs
@@ -5,6 +5,7 @@
\section[ConLike]{@ConLike@: Constructor-like things}
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module ConLike (
ConLike(..)
diff --git a/compiler/basicTypes/DataCon.lhs b/compiler/basicTypes/DataCon.lhs
index ad56290694ad..771aa303a141 100644
--- a/compiler/basicTypes/DataCon.lhs
+++ b/compiler/basicTypes/DataCon.lhs
@@ -5,7 +5,8 @@
\section[DataCon]{@DataCon@: Data Constructors}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -941,7 +942,7 @@ dataConRepArgTys (MkData { dcRep = rep
-- to its info table and used by the GHCi debugger and the heap profiler
dataConIdentity :: DataCon -> [Word8]
-- We want this string to be UTF-8, so we get the bytes directly from the FastStrings.
-dataConIdentity dc = bytesFS (packageIdFS (modulePackageId mod)) ++
+dataConIdentity dc = bytesFS (packageKeyFS (modulePackageKey mod)) ++
fromIntegral (ord ':') : bytesFS (moduleNameFS (moduleName mod)) ++
fromIntegral (ord '.') : bytesFS (occNameFS (nameOccName name))
where name = dataConName dc
diff --git a/compiler/basicTypes/Demand.lhs b/compiler/basicTypes/Demand.lhs
index 9607a159c28b..ed055b58084f 100644
--- a/compiler/basicTypes/Demand.lhs
+++ b/compiler/basicTypes/Demand.lhs
@@ -5,6 +5,7 @@
\section[Demand]{@Demand@: A decoupled implementation of a demand domain}
\begin{code}
+{-# LANGUAGE CPP, FlexibleInstances, TypeSynonymInstances #-}
module Demand (
StrDmd, UseDmd(..), Count(..),
@@ -41,9 +42,10 @@ module Demand (
deferAfterIO,
postProcessUnsat, postProcessDmdTypeM,
- splitProdDmd, splitProdDmd_maybe, peelCallDmd, mkCallDmd,
+ splitProdDmd_maybe, peelCallDmd, mkCallDmd,
dmdTransformSig, dmdTransformDataConSig, dmdTransformDictSelSig,
argOneShots, argsOneShots,
+ trimToType, TypeShape(..),
isSingleUsed, reuseEnv, zapDemand, zapStrictSig,
@@ -64,9 +66,10 @@ import BasicTypes
import Binary
import Maybes ( orElse )
-import Type ( Type )
+import Type ( Type, isUnLiftedType )
import TyCon ( isNewTyCon, isClassTyCon )
import DataCon ( splitDataProductType_maybe )
+import FastString
\end{code}
%************************************************************************
@@ -198,11 +201,13 @@ seqMaybeStr Lazy = ()
seqMaybeStr (Str s) = seqStrDmd s
-- Splitting polymorphic demands
-splitStrProdDmd :: Int -> StrDmd -> [MaybeStr]
-splitStrProdDmd n HyperStr = replicate n strBot
-splitStrProdDmd n HeadStr = replicate n strTop
-splitStrProdDmd n (SProd ds) = ASSERT( ds `lengthIs` n) ds
-splitStrProdDmd _ d@(SCall {}) = pprPanic "attempt to prod-split strictness call demand" (ppr d)
+splitStrProdDmd :: Int -> StrDmd -> Maybe [MaybeStr]
+splitStrProdDmd n HyperStr = Just (replicate n strBot)
+splitStrProdDmd n HeadStr = Just (replicate n strTop)
+splitStrProdDmd n (SProd ds) = ASSERT( ds `lengthIs` n) Just ds
+splitStrProdDmd _ (SCall {}) = Nothing
+ -- This can happen when the programmer uses unsafeCoerce,
+ -- and we don't then want to crash the compiler (Trac #9208)
\end{code}
%************************************************************************
@@ -439,13 +444,15 @@ seqMaybeUsed (Use c u) = c `seq` seqUseDmd u
seqMaybeUsed _ = ()
-- Splitting polymorphic Maybe-Used demands
-splitUseProdDmd :: Int -> UseDmd -> [MaybeUsed]
-splitUseProdDmd n Used = replicate n useTop
-splitUseProdDmd n UHead = replicate n Abs
-splitUseProdDmd n (UProd ds) = ASSERT2( ds `lengthIs` n, ppr n $$ ppr ds ) ds
-splitUseProdDmd _ d@(UCall _ _) = pprPanic "attempt to prod-split usage call demand" (ppr d)
+splitUseProdDmd :: Int -> UseDmd -> Maybe [MaybeUsed]
+splitUseProdDmd n Used = Just (replicate n useTop)
+splitUseProdDmd n UHead = Just (replicate n Abs)
+splitUseProdDmd n (UProd ds) = ASSERT2( ds `lengthIs` n, text "splitUseProdDmd" $$ ppr n $$ ppr ds )
+ Just ds
+splitUseProdDmd _ (UCall _ _) = Nothing
+ -- This can happen when the programmer uses unsafeCoerce,
+ -- and we don't then want to crash the compiler (Trac #9208)
\end{code}
-
%************************************************************************
%* *
\subsection{Joint domain for Strictness and Absence}
@@ -638,8 +645,66 @@ isSingleUsed (JD {absd=a}) = is_used_once a
is_used_once Abs = True
is_used_once (Use One _) = True
is_used_once _ = False
+
+
+data TypeShape = TsFun TypeShape
+ | TsProd [TypeShape]
+ | TsUnk
+
+instance Outputable TypeShape where
+ ppr TsUnk = ptext (sLit "TsUnk")
+ ppr (TsFun ts) = ptext (sLit "TsFun") <> parens (ppr ts)
+ ppr (TsProd tss) = parens (hsep $ punctuate comma $ map ppr tss)
+
+trimToType :: JointDmd -> TypeShape -> JointDmd
+-- See Note [Trimming a demand to a type]
+trimToType (JD ms mu) ts
+ = JD (go_ms ms ts) (go_mu mu ts)
+ where
+ go_ms :: MaybeStr -> TypeShape -> MaybeStr
+ go_ms Lazy _ = Lazy
+ go_ms (Str s) ts = Str (go_s s ts)
+
+ go_s :: StrDmd -> TypeShape -> StrDmd
+ go_s HyperStr _ = HyperStr
+ go_s (SCall s) (TsFun ts) = SCall (go_s s ts)
+ go_s (SProd mss) (TsProd tss)
+ | equalLength mss tss = SProd (zipWith go_ms mss tss)
+ go_s _ _ = HeadStr
+
+ go_mu :: MaybeUsed -> TypeShape -> MaybeUsed
+ go_mu Abs _ = Abs
+ go_mu (Use c u) ts = Use c (go_u u ts)
+
+ go_u :: UseDmd -> TypeShape -> UseDmd
+ go_u UHead _ = UHead
+ go_u (UCall c u) (TsFun ts) = UCall c (go_u u ts)
+ go_u (UProd mus) (TsProd tss)
+ | equalLength mus tss = UProd (zipWith go_mu mus tss)
+ go_u _ _ = Used
\end{code}
+Note [Trimming a demand to a type]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider this:
+
+ f :: a -> Bool
+ f x = case ... of
+ A g1 -> case (x |> g1) of (p,q) -> ...
+ B -> error "urk"
+
+where A,B are the constructors of a GADT. We'll get a U(U,U) demand
+on x from the A branch, but that's a stupid demand for x itself, which
+has type 'a'. Indeed we get ASSERTs going off (notably in
+splitUseProdDmd, Trac #8569).
+
+Bottom line: we really don't want to have a binder whose demand is more
+deeply-nested than its type. There are various ways to tackle this.
+When processing (x |> g1), we could "trim" the incoming demand U(U,U)
+to match x's type. But I'm currently doing so just at the moment when
+we pin a demand on a binder, in DmdAnal.findBndrDmd.
+
+
Note [Threshold demands]
~~~~~~~~~~~~~~~~~~~~~~~~
Threshold usage demand is generated to figure out if
@@ -659,26 +724,18 @@ can be expanded to saturate a callee's arity.
\begin{code}
-splitProdDmd :: Arity -> JointDmd -> [JointDmd]
-splitProdDmd n (JD {strd = s, absd = u})
- = mkJointDmds (split_str s) (split_abs u)
- where
- split_str Lazy = replicate n Lazy
- split_str (Str s) = splitStrProdDmd n s
-
- split_abs Abs = replicate n Abs
- split_abs (Use _ u) = splitUseProdDmd n u
-
splitProdDmd_maybe :: JointDmd -> Maybe [JointDmd]
-- Split a product into its components, iff there is any
-- useful information to be extracted thereby
-- The demand is not necessarily strict!
splitProdDmd_maybe (JD {strd = s, absd = u})
= case (s,u) of
- (Str (SProd sx), Use _ u) -> Just (mkJointDmds sx (splitUseProdDmd (length sx) u))
- (Str s, Use _ (UProd ux)) -> Just (mkJointDmds (splitStrProdDmd (length ux) s) ux)
- (Lazy, Use _ (UProd ux)) -> Just (mkJointDmds (replicate (length ux) Lazy) ux)
- _ -> Nothing
+ (Str (SProd sx), Use _ u) | Just ux <- splitUseProdDmd (length sx) u
+ -> Just (mkJointDmds sx ux)
+ (Str s, Use _ (UProd ux)) | Just sx <- splitStrProdDmd (length ux) s
+ -> Just (mkJointDmds sx ux)
+ (Lazy, Use _ (UProd ux)) -> Just (mkJointDmds (replicate (length ux) Lazy) ux)
+ _ -> Nothing
\end{code}
%************************************************************************
@@ -960,7 +1017,7 @@ this has a strictness signature of
meaning that "b2 `seq` ()" and "b2 1 `seq` ()" might well terminate, but
for "b2 1 2 `seq` ()" we get definite divergence.
-For comparision,
+For comparison,
b1 x = x `seq` error (show x)
has a strictness signature of
b
@@ -1144,13 +1201,18 @@ type DeferAndUse -- Describes how to degrade a result type
type DeferAndUseM = Maybe DeferAndUse
-- Nothing <=> absent-ify the result type; it will never be used
-toCleanDmd :: Demand -> (CleanDemand, DeferAndUseM)
--- See Note [Analyzing with lazy demand and lambdas]
-toCleanDmd (JD { strd = s, absd = u })
+toCleanDmd :: Demand -> Type -> (CleanDemand, DeferAndUseM)
+toCleanDmd (JD { strd = s, absd = u }) expr_ty
= case (s,u) of
- (Str s', Use c u') -> (CD { sd = s', ud = u' }, Just (False, c))
- (Lazy, Use c u') -> (CD { sd = HeadStr, ud = u' }, Just (True, c))
- (_, Abs) -> (CD { sd = HeadStr, ud = Used }, Nothing)
+ (Str s', Use c u') -> -- The normal case
+ (CD { sd = s', ud = u' }, Just (False, c))
+
+ (Lazy, Use c u') -> -- See Note [Analyzing with lazy demand and lambdas]
+ (CD { sd = HeadStr, ud = u' }, Just (True, c))
+
+ (_, Abs) -- See Note [Analysing with absent demand]
+ | isUnLiftedType expr_ty -> (CD { sd = HeadStr, ud = Used }, Just (False, One))
+ | otherwise -> (CD { sd = HeadStr, ud = Used }, Nothing)
-- This is used in dmdAnalStar when post-processing
-- a function's argument demand. So we only care about what
@@ -1325,13 +1387,13 @@ cardinality analysis of the following example:
{-# NOINLINE build #-}
build g = (g (:) [], g (:) [])
-h c z = build (\x ->
- let z1 = z ++ z
+h c z = build (\x ->
+ let z1 = z ++ z
in if c
then \y -> x (y ++ z1)
else \y -> x (z1 ++ y))
-One can see that `build` assigns to `g` demand .
+One can see that `build` assigns to `g` demand .
Therefore, when analyzing the lambda `(\x -> ...)`, we
expect each lambda \y -> ... to be annotated as "one-shot"
one. Therefore (\x -> \y -> x (y ++ z)) should be analyzed with a
@@ -1340,6 +1402,46 @@ demand .
This is achieved by, first, converting the lazy demand L into the
strict S by the second clause of the analysis.
+Note [Analysing with absent demand]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Suppose we analyse an expression with demand . The "A" means
+"absent", so this expression will never be needed. What should happen?
+There are several wrinkles:
+
+* We *do* want to analyse the expression regardless.
+ Reason: Note [Always analyse in virgin pass]
+
+ But we can post-process the results to ignore all the usage
+ demands coming back. This is done by postProcessDmdTypeM.
+
+* But in the case of an *unlifted type* we must be extra careful,
+ because unlifted values are evaluated even if they are not used.
+ Example (see Trac #9254):
+ f :: (() -> (# Int#, () #)) -> ()
+ -- Strictness signature is
+ --
+ -- I.e. calls k, but discards first component of result
+ f k = case k () of (# _, r #) -> r
+
+ g :: Int -> ()
+ g y = f (\n -> (# case y of I# y2 -> y2, n #))
+
+ Here f's strictness signature says (correctly) that it calls its
+ argument function and ignores the first component of its result.
+ This is correct in the sense that it'd be fine to (say) modify the
+ function so that always returned 0# in the first component.
+
+ But in function g, we *will* evaluate the 'case y of ...', because
+ it has type Int#. So 'y' will be evaluated. So we must record this
+ usage of 'y', else 'g' will say 'y' is absent, and will w/w so that
+ 'y' is bound to an aBSENT_ERROR thunk.
+
+ An alternative would be to replace the 'case y of ...' with (say) 0#,
+ but I have not tried that. It's not a common situation, but it is
+ not theoretical: unsafePerformIO's implementation is very very like
+ 'f' above.
+
+
%************************************************************************
%* *
Demand signatures
@@ -1451,7 +1553,7 @@ dmdTransformDataConSig :: Arity -> StrictSig -> CleanDemand -> DmdType
-- which has a special kind of demand transformer.
-- If the constructor is saturated, we feed the demand on
-- the result into the constructor arguments.
-dmdTransformDataConSig arity (StrictSig (DmdType _ _ con_res))
+dmdTransformDataConSig arity (StrictSig (DmdType _ _ con_res))
(CD { sd = str, ud = abs })
| Just str_dmds <- go_str arity str
, Just abs_dmds <- go_abs arity abs
@@ -1461,12 +1563,12 @@ dmdTransformDataConSig arity (StrictSig (DmdType _ _ con_res))
| otherwise -- Not saturated
= nopDmdType
where
- go_str 0 dmd = Just (splitStrProdDmd arity dmd)
+ go_str 0 dmd = splitStrProdDmd arity dmd
go_str n (SCall s') = go_str (n-1) s'
go_str n HyperStr = go_str (n-1) HyperStr
go_str _ _ = Nothing
- go_abs 0 dmd = Just (splitUseProdDmd arity dmd)
+ go_abs 0 dmd = splitUseProdDmd arity dmd
go_abs n (UCall One u') = go_abs (n-1) u'
go_abs _ _ = Nothing
diff --git a/compiler/basicTypes/Id.lhs b/compiler/basicTypes/Id.lhs
index 50b36419585b..85e9b3083a21 100644
--- a/compiler/basicTypes/Id.lhs
+++ b/compiler/basicTypes/Id.lhs
@@ -5,6 +5,8 @@
\section[Id]{@Ids@: Value and constructor identifiers}
\begin{code}
+{-# LANGUAGE CPP #-}
+
-- |
-- #name_types#
-- GHC uses several kinds of name internally:
@@ -30,6 +32,7 @@ module Id (
mkGlobalId, mkVanillaGlobal, mkVanillaGlobalWithInfo,
mkLocalId, mkLocalIdWithInfo, mkExportedLocalId,
mkSysLocal, mkSysLocalM, mkUserLocal, mkUserLocalM,
+ mkDerivedLocalM,
mkTemplateLocals, mkTemplateLocalsNum, mkTemplateLocal,
mkWorkerId, mkWiredInIdName,
@@ -71,7 +74,8 @@ module Id (
isStateHackType, stateHackOneShot, typeOneShot,
-- ** Reading 'IdInfo' fields
- idArity,
+ idArity,
+ idCallArity,
idUnfolding, realIdUnfolding,
idSpecialisation, idCoreRules, idHasRules,
idCafInfo,
@@ -82,6 +86,7 @@ module Id (
setIdUnfoldingLazily,
setIdUnfolding,
setIdArity,
+ setIdCallArity,
setIdSpecialisation,
setIdCafInfo,
@@ -131,6 +136,7 @@ import StaticFlags
infixl 1 `setIdUnfoldingLazily`,
`setIdUnfolding`,
`setIdArity`,
+ `setIdCallArity`,
`setIdOccInfo`,
`setIdOneShotInfo`,
@@ -248,8 +254,9 @@ mkLocalIdWithInfo name ty info = Var.mkLocalVar VanillaId name ty info
-- | Create a local 'Id' that is marked as exported.
-- This prevents things attached to it from being removed as dead code.
-mkExportedLocalId :: Name -> Type -> Id
-mkExportedLocalId name ty = Var.mkExportedLocalVar VanillaId name ty vanillaIdInfo
+-- See Note [Exported LocalIds]
+mkExportedLocalId :: IdDetails -> Name -> Type -> Id
+mkExportedLocalId details name ty = Var.mkExportedLocalVar details name ty vanillaIdInfo
-- Note [Free type variables]
@@ -269,6 +276,10 @@ mkUserLocal occ uniq ty loc = mkLocalId (mkInternalName uniq occ loc) ty
mkUserLocalM :: MonadUnique m => OccName -> Type -> SrcSpan -> m Id
mkUserLocalM occ ty loc = getUniqueM >>= (\uniq -> return (mkUserLocal occ uniq ty loc))
+mkDerivedLocalM :: MonadUnique m => (OccName -> OccName) -> Id -> Type -> m Id
+mkDerivedLocalM deriv_name id ty
+ = getUniqueM >>= (\uniq -> return (mkLocalId (mkDerivedInternalName deriv_name uniq (getName id)) ty))
+
mkWiredInIdName :: Module -> FastString -> Unique -> Id -> Name
mkWiredInIdName mod fs uniq id
= mkWiredInName mod (mkOccNameFS varName fs) uniq (AnId id) UserSyntax
@@ -297,6 +308,40 @@ mkTemplateLocalsNum :: Int -> [Type] -> [Id]
mkTemplateLocalsNum n tys = zipWith mkTemplateLocal [n..] tys
\end{code}
+Note [Exported LocalIds]
+~~~~~~~~~~~~~~~~~~~~~~~~
+We use mkExportedLocalId for things like
+ - Dictionary functions (DFunId)
+ - Wrapper and matcher Ids for pattern synonyms
+ - Default methods for classes
+ - etc
+
+They marked as "exported" in the sense that they should be kept alive
+even if apparently unused in other bindings, and not dropped as dead
+code by the occurrence analyser. (But "exported" here does not mean
+"brought into lexical scope by an import declaration". Indeed these
+things are always internal Ids that the user never sees.)
+
+It's very important that they are *LocalIds*, not GlobalIs, for lots
+of reasons:
+
+ * We want to treat them as free variables for the purpose of
+ dependency analysis (e.g. CoreFVs.exprFreeVars).
+
+ * Look them up in the current substitution when we come across
+ occurrences of them (in Subst.lookupIdSubst)
+
+ * Ensure that for dfuns that the specialiser does not float dict uses
+ above their defns, which would prevent good simplifications happening.
+
+ * The strictness analyser treats a occurrence of a GlobalId as
+ imported and assumes it contains strictness in its IdInfo, which
+ isn't true if the thing is bound in the same module as the
+ occurrence.
+
+In CoreTidy we must make all these LocalIds into GlobalIds, so that in
+importing modules (in --make mode) we treat them as properly global.
+That is what is happening in, say tidy_insts in TidyPgm.
%************************************************************************
%* *
@@ -466,6 +511,12 @@ idArity id = arityInfo (idInfo id)
setIdArity :: Id -> Arity -> Id
setIdArity id arity = modifyIdInfo (`setArityInfo` arity) id
+idCallArity :: Id -> Arity
+idCallArity id = callArityInfo (idInfo id)
+
+setIdCallArity :: Id -> Arity -> Id
+setIdCallArity id arity = modifyIdInfo (`setCallArityInfo` arity) id
+
idRepArity :: Id -> RepArity
idRepArity x = typeRepArity (idArity x) (idType x)
diff --git a/compiler/basicTypes/IdInfo.lhs b/compiler/basicTypes/IdInfo.lhs
index 20d9b49cb9e1..f6336480a105 100644
--- a/compiler/basicTypes/IdInfo.lhs
+++ b/compiler/basicTypes/IdInfo.lhs
@@ -8,7 +8,7 @@
Haskell. [WDP 94/11])
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -38,6 +38,8 @@ module IdInfo (
unknownArity,
arityInfo, setArityInfo, ppArityInfo,
+ callArityInfo, setCallArityInfo,
+
-- ** Demand and strictness Info
strictnessInfo, setStrictnessInfo,
demandInfo, setDemandInfo, pprStrictness,
@@ -69,8 +71,6 @@ module IdInfo (
ppCafInfo, mayHaveCafRefs,
cafInfo, setCafInfo,
- -- ** Tick-box Info
- TickBoxOp(..), TickBoxId,
) where
import CoreSyn
@@ -84,7 +84,6 @@ import DataCon
import TyCon
import ForeignCall
import Outputable
-import Module
import FastString
import Demand
@@ -133,8 +132,6 @@ data IdDetails
| PrimOpId PrimOp -- ^ The 'Id' is for a primitive operator
| FCallId ForeignCall -- ^ The 'Id' is for a foreign call
- | TickBoxOpId TickBoxOp -- ^ The 'Id' is for a HPC tick box (both traditional and binary)
-
| DFunId Int Bool -- ^ A dictionary function.
-- Int = the number of "silent" arguments to the dfun
-- e.g. class D a => C a where ...
@@ -163,7 +160,6 @@ pprIdDetails other = brackets (pp other)
pp (ClassOpId {}) = ptext (sLit "ClassOp")
pp (PrimOpId _) = ptext (sLit "PrimOp")
pp (FCallId _) = ptext (sLit "ForeignCall")
- pp (TickBoxOpId _) = ptext (sLit "TickBoxOp")
pp (DFunId ns nt) = ptext (sLit "DFunId")
<> ppWhen (ns /= 0) (brackets (int ns))
<> ppWhen nt (ptext (sLit "(nt)"))
@@ -204,8 +200,9 @@ data IdInfo
strictnessInfo :: StrictSig, -- ^ A strictness signature
- demandInfo :: Demand -- ^ ID demand information
-
+ demandInfo :: Demand, -- ^ ID demand information
+ callArityInfo :: !ArityInfo -- ^ How this is called.
+ -- n <=> all calls have at least n arguments
}
-- | Just evaluate the 'IdInfo' to WHNF
@@ -264,6 +261,8 @@ setUnfoldingInfo info uf
setArityInfo :: IdInfo -> ArityInfo -> IdInfo
setArityInfo info ar = info { arityInfo = ar }
+setCallArityInfo :: IdInfo -> ArityInfo -> IdInfo
+setCallArityInfo info ar = info { callArityInfo = ar }
setCafInfo :: IdInfo -> CafInfo -> IdInfo
setCafInfo info caf = info { cafInfo = caf }
@@ -291,7 +290,8 @@ vanillaIdInfo
inlinePragInfo = defaultInlinePragma,
occInfo = NoOccInfo,
demandInfo = topDmd,
- strictnessInfo = nopSig
+ strictnessInfo = nopSig,
+ callArityInfo = unknownArity
}
-- | More informative 'IdInfo' we can use when we know the 'Id' has no CAF references
@@ -511,20 +511,3 @@ zapFragileInfo info
where
occ = occInfo info
\end{code}
-
-%************************************************************************
-%* *
-\subsection{TickBoxOp}
-%* *
-%************************************************************************
-
-\begin{code}
-type TickBoxId = Int
-
--- | Tick box for Hpc-style coverage
-data TickBoxOp
- = TickBox Module {-# UNPACK #-} !TickBoxId
-
-instance Outputable TickBoxOp where
- ppr (TickBox mod n) = ptext (sLit "tick") <+> ppr (mod,n)
-\end{code}
diff --git a/compiler/basicTypes/Literal.lhs b/compiler/basicTypes/Literal.lhs
index c77915fef66d..13fbb4d46d59 100644
--- a/compiler/basicTypes/Literal.lhs
+++ b/compiler/basicTypes/Literal.lhs
@@ -5,7 +5,7 @@
\section[Literal]{@Literal@: Machine literals (unboxed, of course)}
\begin{code}
-{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module Literal
(
diff --git a/compiler/basicTypes/MkId.lhs b/compiler/basicTypes/MkId.lhs
index 604163fd4618..7816ad90056a 100644
--- a/compiler/basicTypes/MkId.lhs
+++ b/compiler/basicTypes/MkId.lhs
@@ -12,7 +12,8 @@ have a standard form, namely:
- primitive operations
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -20,7 +21,7 @@ have a standard form, namely:
-- for details
module MkId (
- mkDictFunId, mkDictFunTy, mkDictSelId,
+ mkDictFunId, mkDictFunTy, mkDictSelId, mkDictSelRhs,
mkPrimOpId, mkFCallId,
@@ -66,7 +67,6 @@ import PrimOp
import ForeignCall
import DataCon
import Id
-import Var ( mkExportedLocalVar )
import IdInfo
import Demand
import CoreSyn
@@ -125,7 +125,7 @@ is right here.
\begin{code}
wiredInIds :: [Id]
wiredInIds
- = [lazyId]
+ = [lazyId, dollarId]
++ errorIds -- Defined in MkCore
++ ghcPrimIds
@@ -272,39 +272,36 @@ at the outside. When dealing with classes it's very convenient to
recover the original type signature from the class op selector.
\begin{code}
-mkDictSelId :: DynFlags
- -> Bool -- True <=> don't include the unfolding
- -- Little point on imports without -O, because the
- -- dictionary itself won't be visible
- -> Name -- Name of one of the *value* selectors
+mkDictSelId :: Name -- Name of one of the *value* selectors
-- (dictionary superclass or method)
-> Class -> Id
-mkDictSelId dflags no_unf name clas
+mkDictSelId name clas
= mkGlobalId (ClassOpId clas) name sel_ty info
where
- sel_ty = mkForAllTys tyvars (mkFunTy (idType dict_id) (idType the_arg_id))
- -- We can't just say (exprType rhs), because that would give a type
- -- C a -> C a
- -- for a single-op class (after all, the selector is the identity)
- -- But it's type must expose the representation of the dictionary
- -- to get (say) C a -> (a -> a)
+ tycon = classTyCon clas
+ sel_names = map idName (classAllSelIds clas)
+ new_tycon = isNewTyCon tycon
+ [data_con] = tyConDataCons tycon
+ tyvars = dataConUnivTyVars data_con
+ arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
+ val_index = assoc "MkId.mkDictSelId" (sel_names `zip` [0..]) name
+
+ sel_ty = mkForAllTys tyvars (mkFunTy (mkClassPred clas (mkTyVarTys tyvars))
+ (getNth arg_tys val_index))
base_info = noCafIdInfo
`setArityInfo` 1
`setStrictnessInfo` strict_sig
- `setUnfoldingInfo` (if no_unf then noUnfolding
- else mkImplicitUnfolding dflags rhs)
- -- In module where class op is defined, we must add
- -- the unfolding, even though it'll never be inlined
- -- because we use that to generate a top-level binding
- -- for the ClassOp
-
- info | new_tycon = base_info `setInlinePragInfo` alwaysInlinePragma
+
+ info | new_tycon
+ = base_info `setInlinePragInfo` alwaysInlinePragma
+ `setUnfoldingInfo` mkInlineUnfolding (Just 1) (mkDictSelRhs clas val_index)
-- See Note [Single-method classes] in TcInstDcls
-- for why alwaysInlinePragma
- | otherwise = base_info `setSpecInfo` mkSpecInfo [rule]
- `setInlinePragInfo` neverInlinePragma
- -- Add a magic BuiltinRule, and never inline it
+
+ | otherwise
+ = base_info `setSpecInfo` mkSpecInfo [rule]
+ -- Add a magic BuiltinRule, but no unfolding
-- so that the rule is always available to fire.
-- See Note [ClassOp/DFun selection] in TcInstDcls
@@ -326,25 +323,26 @@ mkDictSelId dflags no_unf name clas
strict_sig = mkClosedStrictSig [arg_dmd] topRes
arg_dmd | new_tycon = evalDmd
| otherwise = mkManyUsedDmd $
- mkProdDmd [ if the_arg_id == id then evalDmd else absDmd
- | id <- arg_ids ]
-
+ mkProdDmd [ if name == sel_name then evalDmd else absDmd
+ | sel_name <- sel_names ]
+
+mkDictSelRhs :: Class
+ -> Int -- 0-indexed selector among (superclasses ++ methods)
+ -> CoreExpr
+mkDictSelRhs clas val_index
+ = mkLams tyvars (Lam dict_id rhs_body)
+ where
tycon = classTyCon clas
new_tycon = isNewTyCon tycon
[data_con] = tyConDataCons tycon
tyvars = dataConUnivTyVars data_con
arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
- -- 'index' is a 0-index into the *value* arguments of the dictionary
- val_index = assoc "MkId.mkDictSelId" sel_index_prs name
- sel_index_prs = map idName (classAllSelIds clas) `zip` [0..]
-
the_arg_id = getNth arg_ids val_index
pred = mkClassPred clas (mkTyVarTys tyvars)
dict_id = mkTemplateLocal 1 pred
arg_ids = mkTemplateLocalsNum 2 arg_tys
- rhs = mkLams tyvars (Lam dict_id rhs_body)
rhs_body | new_tycon = unwrapNewTypeBody tycon (map mkTyVarTy tyvars) (Var dict_id)
| otherwise = Case (Var dict_id) dict_id (idType the_arg_id)
[(DataAlt data_con, arg_ids, varToCoreExpr the_arg_id)]
@@ -956,29 +954,13 @@ mkFCallId dflags uniq fcall ty
%* *
%************************************************************************
-Important notes about dict funs and default methods
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note [Dict funs and default methods]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dict funs and default methods are *not* ImplicitIds. Their definition
involves user-written code, so we can't figure out their strictness etc
based on fixed info, as we can for constructors and record selectors (say).
-We build them as LocalIds, but with External Names. This ensures that
-they are taken to account by free-variable finding and dependency
-analysis (e.g. CoreFVs.exprFreeVars).
-
-Why shouldn't they be bound as GlobalIds? Because, in particular, if
-they are globals, the specialiser floats dict uses above their defns,
-which prevents good simplifications happening. Also the strictness
-analyser treats a occurrence of a GlobalId as imported and assumes it
-contains strictness in its IdInfo, which isn't true if the thing is
-bound in the same module as the occurrence.
-
-It's OK for dfuns to be LocalIds, because we form the instance-env to
-pass on to the next module (md_insts) in CoreTidy, afer tidying
-and globalising the top-level Ids.
-
-BUT make sure they are *exported* LocalIds (mkExportedLocalId) so
-that they aren't discarded by the occurrence analyser.
+NB: See also Note [Exported LocalIds] in Id
\begin{code}
mkDictFunId :: Name -- Name to use for the dict fun;
@@ -988,12 +970,12 @@ mkDictFunId :: Name -- Name to use for the dict fun;
-> [Type]
-> Id
-- Implements the DFun Superclass Invariant (see TcInstDcls)
+-- See Note [Dict funs and default methods]
mkDictFunId dfun_name tvs theta clas tys
- = mkExportedLocalVar (DFunId n_silent is_nt)
- dfun_name
- dfun_ty
- vanillaIdInfo
+ = mkExportedLocalId (DFunId n_silent is_nt)
+ dfun_name
+ dfun_ty
where
is_nt = isNewTyCon (classTyCon clas)
(n_silent, dfun_ty) = mkDictFunTy tvs theta clas tys
@@ -1040,20 +1022,32 @@ another gun with which to shoot yourself in the foot.
\begin{code}
lazyIdName, unsafeCoerceName, nullAddrName, seqName,
realWorldName, voidPrimIdName, coercionTokenName,
- magicDictName, coerceName, proxyName :: Name
-unsafeCoerceName = mkWiredInIdName gHC_PRIM (fsLit "unsafeCoerce#") unsafeCoerceIdKey unsafeCoerceId
-nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId
-seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId
-realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId
-voidPrimIdName = mkWiredInIdName gHC_PRIM (fsLit "void#") voidPrimIdKey voidPrimId
-lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId
-coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId
-magicDictName = mkWiredInIdName gHC_PRIM (fsLit "magicDict") magicDictKey magicDictId
-coerceName = mkWiredInIdName gHC_PRIM (fsLit "coerce") coerceKey coerceId
-proxyName = mkWiredInIdName gHC_PRIM (fsLit "proxy#") proxyHashKey proxyHashId
+ magicDictName, coerceName, proxyName, dollarName :: Name
+unsafeCoerceName = mkWiredInIdName gHC_PRIM (fsLit "unsafeCoerce#") unsafeCoerceIdKey unsafeCoerceId
+nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId
+seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId
+realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId
+voidPrimIdName = mkWiredInIdName gHC_PRIM (fsLit "void#") voidPrimIdKey voidPrimId
+lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId
+coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId
+magicDictName = mkWiredInIdName gHC_PRIM (fsLit "magicDict") magicDictKey magicDictId
+coerceName = mkWiredInIdName gHC_PRIM (fsLit "coerce") coerceKey coerceId
+proxyName = mkWiredInIdName gHC_PRIM (fsLit "proxy#") proxyHashKey proxyHashId
+dollarName = mkWiredInIdName gHC_BASE (fsLit "$") dollarIdKey dollarId
\end{code}
\begin{code}
+dollarId :: Id -- Note [dollarId magic]
+dollarId = pcMiscPrelId dollarName ty
+ (noCafIdInfo `setUnfoldingInfo` unf)
+ where
+ fun_ty = mkFunTy alphaTy openBetaTy
+ ty = mkForAllTys [alphaTyVar, openBetaTyVar] $
+ mkFunTy fun_ty fun_ty
+ unf = mkInlineUnfolding (Just 2) rhs
+ [f,x] = mkTemplateLocals [fun_ty, alphaTy]
+ rhs = mkLams [alphaTyVar, openBetaTyVar, f, x] $
+ App (Var f) (Var x)
------------------------------------------------
-- proxy# :: forall a. Proxy# a
@@ -1160,6 +1154,20 @@ coerceId = pcMiscPrelId coerceName ty info
[(DataAlt coercibleDataCon, [eq], Cast (Var x) (CoVarCo eq))]
\end{code}
+Note [dollarId magic]
+~~~~~~~~~~~~~~~~~~~~~
+The only reason that ($) is wired in is so that its type can be
+ forall (a:*, b:Open). (a->b) -> a -> b
+That is, the return type can be unboxed. E.g. this is OK
+ foo $ True where foo :: Bool -> Int#
+because ($) doesn't inspect or move the result of the call to foo.
+See Trac #8739.
+
+There is a special typing rule for ($) in TcExpr, so the type of ($)
+isn't looked at there, BUT Lint subsequently (and rightly) complains
+if sees ($) applied to Int# (say), unless we give it a wired-in type
+as we do here.
+
Note [Unsafe coerce magic]
~~~~~~~~~~~~~~~~~~~~~~~~~~
We define a *primitive*
diff --git a/compiler/basicTypes/Module.lhs b/compiler/basicTypes/Module.lhs
index 90bf717a857d..8f21d66bc18f 100644
--- a/compiler/basicTypes/Module.lhs
+++ b/compiler/basicTypes/Module.lhs
@@ -9,6 +9,7 @@ These are Uniquable, hence we can build Maps with Modules as
the keys.
\begin{code}
+{-# LANGUAGE DeriveDataTypeable #-}
module Module
(
@@ -22,30 +23,31 @@ module Module
mkModuleNameFS,
stableModuleNameCmp,
- -- * The PackageId type
- PackageId,
- fsToPackageId,
- packageIdFS,
- stringToPackageId,
- packageIdString,
- stablePackageIdCmp,
+ -- * The PackageKey type
+ PackageKey,
+ fsToPackageKey,
+ packageKeyFS,
+ stringToPackageKey,
+ packageKeyString,
+ stablePackageKeyCmp,
- -- * Wired-in PackageIds
+ -- * Wired-in PackageKeys
-- $wired_in_packages
- primPackageId,
- integerPackageId,
- basePackageId,
- rtsPackageId,
- thPackageId,
- dphSeqPackageId,
- dphParPackageId,
- mainPackageId,
- thisGhcPackageId,
- interactivePackageId, isInteractiveModule,
+ primPackageKey,
+ integerPackageKey,
+ basePackageKey,
+ rtsPackageKey,
+ thPackageKey,
+ dphSeqPackageKey,
+ dphParPackageKey,
+ mainPackageKey,
+ thisGhcPackageKey,
+ interactivePackageKey, isInteractiveModule,
+ wiredInPackageKeys,
-- * The Module type
Module,
- modulePackageId, moduleName,
+ modulePackageKey, moduleName,
pprModule,
mkModule,
stableModuleCmp,
@@ -81,6 +83,7 @@ import UniqFM
import FastString
import Binary
import Util
+import {-# SOURCE #-} Packages
import Data.Data
import Data.Map (Map)
@@ -227,15 +230,15 @@ moduleNameColons = dots_to_colons . moduleNameString
%************************************************************************
\begin{code}
--- | A Module is a pair of a 'PackageId' and a 'ModuleName'.
+-- | A Module is a pair of a 'PackageKey' and a 'ModuleName'.
data Module = Module {
- modulePackageId :: !PackageId, -- pkg-1.0
+ modulePackageKey :: !PackageKey, -- pkg-1.0
moduleName :: !ModuleName -- A.B.C
}
deriving (Eq, Ord, Typeable)
instance Uniquable Module where
- getUnique (Module p n) = getUnique (packageIdFS p `appendFS` moduleNameFS n)
+ getUnique (Module p n) = getUnique (packageKeyFS p `appendFS` moduleNameFS n)
instance Outputable Module where
ppr = pprModule
@@ -255,25 +258,25 @@ instance Data Module where
-- not be stable from run to run of the compiler.
stableModuleCmp :: Module -> Module -> Ordering
stableModuleCmp (Module p1 n1) (Module p2 n2)
- = (p1 `stablePackageIdCmp` p2) `thenCmp`
+ = (p1 `stablePackageKeyCmp` p2) `thenCmp`
(n1 `stableModuleNameCmp` n2)
-mkModule :: PackageId -> ModuleName -> Module
+mkModule :: PackageKey -> ModuleName -> Module
mkModule = Module
pprModule :: Module -> SDoc
pprModule mod@(Module p n) =
pprPackagePrefix p mod <> pprModuleName n
-pprPackagePrefix :: PackageId -> Module -> SDoc
+pprPackagePrefix :: PackageKey -> Module -> SDoc
pprPackagePrefix p mod = getPprStyle doc
where
doc sty
| codeStyle sty =
- if p == mainPackageId
+ if p == mainPackageKey
then empty -- never qualify the main package in code
- else ztext (zEncodeFS (packageIdFS p)) <> char '_'
- | qualModule sty mod = ftext (packageIdFS (modulePackageId mod)) <> char ':'
+ else ztext (zEncodeFS (packageKeyFS p)) <> char '_'
+ | qualModule sty mod = ppr (modulePackageKey mod) <> char ':'
-- the PrintUnqualified tells us which modules have to
-- be qualified with package names
| otherwise = empty
@@ -287,51 +290,59 @@ class HasModule m where
%************************************************************************
%* *
-\subsection{PackageId}
+\subsection{PackageKey}
%* *
%************************************************************************
\begin{code}
--- | Essentially just a string identifying a package, including the version: e.g. parsec-1.0
-newtype PackageId = PId FastString deriving( Eq, Typeable )
+-- | A string which uniquely identifies a package. For wired-in packages,
+-- it is just the package name, but for user compiled packages, it is a hash.
+-- ToDo: when the key is a hash, we can do more clever things than store
+-- the hex representation and hash-cons those strings.
+newtype PackageKey = PId FastString deriving( Eq, Typeable )
-- here to avoid module loops with PackageConfig
-instance Uniquable PackageId where
- getUnique pid = getUnique (packageIdFS pid)
+instance Uniquable PackageKey where
+ getUnique pid = getUnique (packageKeyFS pid)
-- Note: *not* a stable lexicographic ordering, a faster unique-based
-- ordering.
-instance Ord PackageId where
+instance Ord PackageKey where
nm1 `compare` nm2 = getUnique nm1 `compare` getUnique nm2
-instance Data PackageId where
+instance Data PackageKey where
-- don't traverse?
- toConstr _ = abstractConstr "PackageId"
+ toConstr _ = abstractConstr "PackageKey"
gunfold _ _ = error "gunfold"
- dataTypeOf _ = mkNoRepType "PackageId"
+ dataTypeOf _ = mkNoRepType "PackageKey"
-stablePackageIdCmp :: PackageId -> PackageId -> Ordering
+stablePackageKeyCmp :: PackageKey -> PackageKey -> Ordering
-- ^ Compares package ids lexically, rather than by their 'Unique's
-stablePackageIdCmp p1 p2 = packageIdFS p1 `compare` packageIdFS p2
+stablePackageKeyCmp p1 p2 = packageKeyFS p1 `compare` packageKeyFS p2
-instance Outputable PackageId where
- ppr pid = text (packageIdString pid)
+instance Outputable PackageKey where
+ ppr pk = getPprStyle $ \sty -> sdocWithDynFlags $ \dflags ->
+ text (packageKeyPackageIdString dflags pk)
+ -- Don't bother qualifying if it's wired in!
+ <> (if qualPackage sty pk && not (pk `elem` wiredInPackageKeys)
+ then char '@' <> ftext (packageKeyFS pk)
+ else empty)
-instance Binary PackageId where
- put_ bh pid = put_ bh (packageIdFS pid)
- get bh = do { fs <- get bh; return (fsToPackageId fs) }
+instance Binary PackageKey where
+ put_ bh pid = put_ bh (packageKeyFS pid)
+ get bh = do { fs <- get bh; return (fsToPackageKey fs) }
-fsToPackageId :: FastString -> PackageId
-fsToPackageId = PId
+fsToPackageKey :: FastString -> PackageKey
+fsToPackageKey = PId
-packageIdFS :: PackageId -> FastString
-packageIdFS (PId fs) = fs
+packageKeyFS :: PackageKey -> FastString
+packageKeyFS (PId fs) = fs
-stringToPackageId :: String -> PackageId
-stringToPackageId = fsToPackageId . mkFastString
+stringToPackageKey :: String -> PackageKey
+stringToPackageKey = fsToPackageKey . mkFastString
-packageIdString :: PackageId -> String
-packageIdString = unpackFS . packageIdFS
+packageKeyString :: PackageKey -> String
+packageKeyString = unpackFS . packageKeyFS
-- -----------------------------------------------------------------------------
@@ -347,7 +358,7 @@ packageIdString = unpackFS . packageIdFS
-- versions of them installed. However, for each invocation of GHC,
-- only a single instance of each wired-in package will be recognised
-- (the desired one is selected via @-package@\/@-hide-package@), and GHC
--- will use the unversioned 'PackageId' below when referring to it,
+-- will use the unversioned 'PackageKey' below when referring to it,
-- including in .hi files and object file symbols. Unselected
-- versions of wired-in packages will be ignored, as will any other
-- package that depends directly or indirectly on it (much as if you
@@ -355,27 +366,37 @@ packageIdString = unpackFS . packageIdFS
-- Make sure you change 'Packages.findWiredInPackages' if you add an entry here
-integerPackageId, primPackageId,
- basePackageId, rtsPackageId,
- thPackageId, dphSeqPackageId, dphParPackageId,
- mainPackageId, thisGhcPackageId, interactivePackageId :: PackageId
-primPackageId = fsToPackageId (fsLit "ghc-prim")
-integerPackageId = fsToPackageId (fsLit cIntegerLibrary)
-basePackageId = fsToPackageId (fsLit "base")
-rtsPackageId = fsToPackageId (fsLit "rts")
-thPackageId = fsToPackageId (fsLit "template-haskell")
-dphSeqPackageId = fsToPackageId (fsLit "dph-seq")
-dphParPackageId = fsToPackageId (fsLit "dph-par")
-thisGhcPackageId = fsToPackageId (fsLit ("ghc-" ++ cProjectVersion))
-interactivePackageId = fsToPackageId (fsLit "interactive")
+integerPackageKey, primPackageKey,
+ basePackageKey, rtsPackageKey,
+ thPackageKey, dphSeqPackageKey, dphParPackageKey,
+ mainPackageKey, thisGhcPackageKey, interactivePackageKey :: PackageKey
+primPackageKey = fsToPackageKey (fsLit "ghc-prim")
+integerPackageKey = fsToPackageKey (fsLit cIntegerLibrary)
+basePackageKey = fsToPackageKey (fsLit "base")
+rtsPackageKey = fsToPackageKey (fsLit "rts")
+thPackageKey = fsToPackageKey (fsLit "template-haskell")
+dphSeqPackageKey = fsToPackageKey (fsLit "dph-seq")
+dphParPackageKey = fsToPackageKey (fsLit "dph-par")
+thisGhcPackageKey = fsToPackageKey (fsLit "ghc")
+interactivePackageKey = fsToPackageKey (fsLit "interactive")
-- | This is the package Id for the current program. It is the default
-- package Id if you don't specify a package name. We don't add this prefix
-- to symbol names, since there can be only one main package per program.
-mainPackageId = fsToPackageId (fsLit "main")
+mainPackageKey = fsToPackageKey (fsLit "main")
isInteractiveModule :: Module -> Bool
-isInteractiveModule mod = modulePackageId mod == interactivePackageId
+isInteractiveModule mod = modulePackageKey mod == interactivePackageKey
+
+wiredInPackageKeys :: [PackageKey]
+wiredInPackageKeys = [ primPackageKey,
+ integerPackageKey,
+ basePackageKey,
+ rtsPackageKey,
+ thPackageKey,
+ thisGhcPackageKey,
+ dphSeqPackageKey,
+ dphParPackageKey ]
\end{code}
%************************************************************************
diff --git a/compiler/basicTypes/Module.lhs-boot b/compiler/basicTypes/Module.lhs-boot
index 63839b55bc5e..6d194d6a2a0e 100644
--- a/compiler/basicTypes/Module.lhs-boot
+++ b/compiler/basicTypes/Module.lhs-boot
@@ -3,8 +3,8 @@ module Module where
data Module
data ModuleName
-data PackageId
+data PackageKey
moduleName :: Module -> ModuleName
-modulePackageId :: Module -> PackageId
-packageIdString :: PackageId -> String
+modulePackageKey :: Module -> PackageKey
+packageKeyString :: PackageKey -> String
\end{code}
diff --git a/compiler/basicTypes/Name.lhs b/compiler/basicTypes/Name.lhs
index e2742bb3a8ed..7651c7c74926 100644
--- a/compiler/basicTypes/Name.lhs
+++ b/compiler/basicTypes/Name.lhs
@@ -5,6 +5,8 @@
\section[Name]{@Name@: to transmit name info from renamer to typechecker}
\begin{code}
+{-# LANGUAGE DeriveDataTypeable #-}
+
-- |
-- #name_types#
-- GHC uses several kinds of name internally:
@@ -501,7 +503,7 @@ pprModulePrefix sty mod occ = sdocWithDynFlags $ \dflags ->
case qualName sty mod occ of -- See Outputable.QualifyName:
NameQual modname -> ppr modname <> dot -- Name is in scope
NameNotInScope1 -> ppr mod <> dot -- Not in scope
- NameNotInScope2 -> ppr (modulePackageId mod) <> colon -- Module not in
+ NameNotInScope2 -> ppr (modulePackageKey mod) <> colon -- Module not in
<> ppr (moduleName mod) <> dot -- scope either
_otherwise -> empty
diff --git a/compiler/basicTypes/NameEnv.lhs b/compiler/basicTypes/NameEnv.lhs
index 79433ca14451..f39627706db8 100644
--- a/compiler/basicTypes/NameEnv.lhs
+++ b/compiler/basicTypes/NameEnv.lhs
@@ -5,7 +5,8 @@
\section[NameEnv]{@NameEnv@: name environments}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -58,7 +59,7 @@ depAnal get_defs get_uses nodes
= stronglyConnCompFromEdgedVertices (map mk_node keyed_nodes)
where
keyed_nodes = nodes `zip` [(1::Int)..]
- mk_node (node, key) = (node, key, mapCatMaybes (lookupNameEnv key_map) (get_uses node))
+ mk_node (node, key) = (node, key, mapMaybe (lookupNameEnv key_map) (get_uses node))
key_map :: NameEnv Int -- Maps a Name to the key of the decl that defines it
key_map = mkNameEnv [(name,key) | (node, key) <- keyed_nodes, name <- get_defs node]
diff --git a/compiler/basicTypes/NameSet.lhs b/compiler/basicTypes/NameSet.lhs
index ed42c2b1aadf..9cd9fcef939f 100644
--- a/compiler/basicTypes/NameSet.lhs
+++ b/compiler/basicTypes/NameSet.lhs
@@ -4,7 +4,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/basicTypes/OccName.lhs b/compiler/basicTypes/OccName.lhs
index 6dbae4bb61d4..d942362db790 100644
--- a/compiler/basicTypes/OccName.lhs
+++ b/compiler/basicTypes/OccName.lhs
@@ -4,6 +4,8 @@
%
\begin{code}
+{-# LANGUAGE DeriveDataTypeable #-}
+
-- |
-- #name_types#
-- GHC uses several kinds of name internally:
@@ -20,7 +22,7 @@
--
-- * 'Var.Var': see "Var#name_types"
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -30,6 +32,8 @@
module OccName (
-- * The 'NameSpace' type
NameSpace, -- Abstract
+
+ nameSpacesRelated,
-- ** Construction
-- $real_vs_source_data_constructors
@@ -51,7 +55,6 @@ module OccName (
mkTcOcc, mkTcOccFS,
mkClsOcc, mkClsOccFS,
mkDFunOcc,
- mkTupleOcc,
setOccNameSpace,
demoteOccName,
HasOccName(..),
@@ -62,9 +65,9 @@ module OccName (
mkGenDefMethodOcc,
mkDerivedTyConOcc, mkNewTyCoOcc, mkClassOpAuxOcc,
mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc,
- mkClassDataConOcc, mkDictOcc, mkIPOcc,
- mkSpecOcc, mkForeignExportOcc, mkGenOcc1, mkGenOcc2,
- mkGenD, mkGenR, mkGen1R, mkGenRCo, mkGenC, mkGenS,
+ mkClassDataConOcc, mkDictOcc, mkIPOcc,
+ mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
+ mkGenD, mkGenR, mkGen1R, mkGenRCo, mkGenC, mkGenS,
mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc,
mkSuperDictSelOcc, mkLocalOcc, mkMethodOcc, mkInstTyTcOcc,
mkInstTyCoOcc, mkEqPredCoOcc,
@@ -82,14 +85,12 @@ module OccName (
isTcClsNameSpace, isTvNameSpace, isDataConNameSpace, isVarNameSpace, isValNameSpace,
- isTupleOcc_maybe,
-
-- * The 'OccEnv' type
OccEnv, emptyOccEnv, unitOccEnv, extendOccEnv, mapOccEnv,
lookupOccEnv, mkOccEnv, mkOccEnv_C, extendOccEnvList, elemOccEnv,
occEnvElts, foldOccEnv, plusOccEnv, plusOccEnv_C, extendOccEnv_C,
extendOccEnv_Acc, filterOccEnv, delListFromOccEnv, delFromOccEnv,
- alterOccEnv,
+ alterOccEnv, pprOccEnv,
-- * The 'OccSet' type
OccSet, emptyOccSet, unitOccSet, mkOccSet, extendOccSet,
@@ -103,12 +104,14 @@ module OccName (
-- * Lexical characteristics of Haskell names
isLexCon, isLexVar, isLexId, isLexSym,
isLexConId, isLexConSym, isLexVarId, isLexVarSym,
- startsVarSym, startsVarId, startsConSym, startsConId
+ startsVarSym, startsVarId, startsConSym, startsConId,
+
+ -- FsEnv
+ FastStringEnv, emptyFsEnv, lookupFsEnv, extendFsEnv, mkFsEnv
) where
import Util
import Unique
-import BasicTypes
import DynFlags
import UniqFM
import UniqSet
@@ -119,6 +122,29 @@ import Data.Char
import Data.Data
\end{code}
+%************************************************************************
+%* *
+ FastStringEnv
+%* *
+%************************************************************************
+
+FastStringEnv can't be in FastString because the env depends on UniqFM
+
+\begin{code}
+type FastStringEnv a = UniqFM a -- Keyed by FastString
+
+
+emptyFsEnv :: FastStringEnv a
+lookupFsEnv :: FastStringEnv a -> FastString -> Maybe a
+extendFsEnv :: FastStringEnv a -> FastString -> a -> FastStringEnv a
+mkFsEnv :: [(FastString,a)] -> FastStringEnv a
+
+emptyFsEnv = emptyUFM
+lookupFsEnv = lookupUFM
+extendFsEnv = addToUFM
+mkFsEnv = listToUFM
+\end{code}
+
%************************************************************************
%* *
\subsection{Name space}
@@ -248,6 +274,9 @@ instance Data OccName where
toConstr _ = abstractConstr "OccName"
gunfold _ _ = error "gunfold"
dataTypeOf _ = mkNoRepType "OccName"
+
+instance HasOccName OccName where
+ occName = id
\end{code}
@@ -261,6 +290,11 @@ instance Data OccName where
instance Outputable OccName where
ppr = pprOccName
+instance OutputableBndr OccName where
+ pprBndr _ = ppr
+ pprInfixOcc n = pprInfixVar (isSymOcc n) (ppr n)
+ pprPrefixOcc n = pprPrefixVar (isSymOcc n) (ppr n)
+
pprOccName :: OccName -> SDoc
pprOccName (OccName sp occ)
= getPprStyle $ \ sty ->
@@ -338,7 +372,20 @@ demoteOccName (OccName space name) = do
space' <- demoteNameSpace space
return $ OccName space' name
-{- | Other names in the compiler add aditional information to an OccName.
+-- Name spaces are related if there is a chance to mean the one when one writes
+-- the other, i.e. variables <-> data constructors and type variables <-> type constructors
+nameSpacesRelated :: NameSpace -> NameSpace -> Bool
+nameSpacesRelated ns1 ns2 = ns1 == ns2 || otherNameSpace ns1 == ns2
+
+otherNameSpace :: NameSpace -> NameSpace
+otherNameSpace VarName = DataName
+otherNameSpace DataName = VarName
+otherNameSpace TvName = TcClsName
+otherNameSpace TcClsName = TvName
+
+
+
+{- | Other names in the compiler add additional information to an OccName.
This class provides a consistent way to access the underlying OccName. -}
class HasOccName name where
occName :: name -> OccName
@@ -415,7 +462,10 @@ filterOccEnv x (A y) = A $ filterUFM x y
alterOccEnv fn (A y) k = A $ alterUFM fn y k
instance Outputable a => Outputable (OccEnv a) where
- ppr (A x) = ppr x
+ ppr x = pprOccEnv ppr x
+
+pprOccEnv :: (a -> SDoc) -> OccEnv a -> SDoc
+pprOccEnv ppr_elt (A env) = pprUniqFM ppr_elt env
type OccSet = UniqSet OccName
@@ -496,7 +546,7 @@ isDataSymOcc _ = False
-- it is a data constructor or variable or whatever)
isSymOcc :: OccName -> Bool
isSymOcc (OccName DataName s) = isLexConSym s
-isSymOcc (OccName TcClsName s) = isLexConSym s || isLexVarSym s
+isSymOcc (OccName TcClsName s) = isLexSym s
isSymOcc (OccName VarName s) = isLexSym s
isSymOcc (OccName TvName s) = isLexSym s
-- Pretty inefficient!
@@ -572,7 +622,7 @@ isDerivedOccName occ =
\begin{code}
mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkDefaultMethodOcc,
mkGenDefMethodOcc, mkDerivedTyConOcc, mkClassDataConOcc, mkDictOcc,
- mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkGenOcc1, mkGenOcc2,
+ mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
mkGenD, mkGenR, mkGen1R, mkGenRCo,
mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkNewTyCoOcc,
mkInstTyCoOcc, mkEqPredCoOcc, mkClassOpAuxOcc,
@@ -593,6 +643,7 @@ mkDictOcc = mk_simple_deriv varName "$d"
mkIPOcc = mk_simple_deriv varName "$i"
mkSpecOcc = mk_simple_deriv varName "$s"
mkForeignExportOcc = mk_simple_deriv varName "$f"
+mkRepEqOcc = mk_simple_deriv tvName "$r" -- In RULES involving Coercible
mkNewTyCoOcc = mk_simple_deriv tcName "NTCo:" -- Coercion for newtypes
mkInstTyCoOcc = mk_simple_deriv tcName "TFCo:" -- Coercion for type functions
mkEqPredCoOcc = mk_simple_deriv tcName "$co"
@@ -805,55 +856,6 @@ tidyOccName env occ@(OccName occ_sp fs)
new_fs = mkFastString (base ++ show n)
\end{code}
-%************************************************************************
-%* *
- Stuff for dealing with tuples
-%* *
-%************************************************************************
-
-\begin{code}
-mkTupleOcc :: NameSpace -> TupleSort -> Arity -> OccName
-mkTupleOcc ns sort ar = OccName ns (mkFastString str)
- where
- -- no need to cache these, the caching is done in the caller
- -- (TysWiredIn.mk_tuple)
- str = case sort of
- UnboxedTuple -> '(' : '#' : commas ++ "#)"
- BoxedTuple -> '(' : commas ++ ")"
- ConstraintTuple -> '(' : commas ++ ")"
- -- Cute hack: reuse the standard tuple OccNames (and hence code)
- -- for fact tuples, but give them different Uniques so they are not equal.
- --
- -- You might think that this will go wrong because isTupleOcc_maybe won't
- -- be able to tell the difference between boxed tuples and fact tuples. BUT:
- -- 1. Fact tuples never occur directly in user code, so it doesn't matter
- -- that we can't detect them in Orig OccNames originating from the user
- -- programs (or those built by setRdrNameSpace used on an Exact tuple Name)
- -- 2. Interface files have a special representation for tuple *occurrences*
- -- in IfaceTyCons, their workers (in IfaceSyn) and their DataCons (in case
- -- alternatives). Thus we don't rely on the OccName to figure out what kind
- -- of tuple an occurrence was trying to use in these situations.
- -- 3. We *don't* represent tuple data type declarations specially, so those
- -- are still turned into wired-in names via isTupleOcc_maybe. But that's OK
- -- because we don't actually need to declare fact tuples thanks to this hack.
- --
- -- So basically any OccName like (,,) flowing to isTupleOcc_maybe will always
- -- refer to the standard boxed tuple. Cool :-)
-
- commas = take (ar-1) (repeat ',')
-
-isTupleOcc_maybe :: OccName -> Maybe (NameSpace, TupleSort, Arity)
--- Tuples are special, because there are so many of them!
-isTupleOcc_maybe (OccName ns fs)
- = case unpackFS fs of
- '(':'#':',':rest -> Just (ns, UnboxedTuple, 2 + count_commas rest)
- '(':',':rest -> Just (ns, BoxedTuple, 2 + count_commas rest)
- _other -> Nothing
- where
- count_commas (',':rest) = 1 + count_commas rest
- count_commas _ = 0
-\end{code}
-
%************************************************************************
%* *
\subsection{Lexical categories}
@@ -863,6 +865,15 @@ isTupleOcc_maybe (OccName ns fs)
These functions test strings to see if they fit the lexical categories
defined in the Haskell report.
+Note [Classification of generated names]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some names generated for internal use can show up in debugging output,
+e.g. when using -ddump-simpl. These generated names start with a $
+but should still be pretty-printed using prefix notation. We make sure
+this is the case in isLexVarSym by only classifying a name as a symbol
+if all its characters are symbols, not just its first one.
+
\begin{code}
isLexCon, isLexVar, isLexId, isLexSym :: FastString -> Bool
isLexConId, isLexConSym, isLexVarId, isLexVarSym :: FastString -> Bool
@@ -889,19 +900,26 @@ isLexConSym cs -- Infix type or data constructors
| cs == (fsLit "->") = True
| otherwise = startsConSym (headFS cs)
-isLexVarSym cs -- Infix identifiers
- | nullFS cs = False -- e.g. "+"
- | otherwise = startsVarSym (headFS cs)
+isLexVarSym fs -- Infix identifiers e.g. "+"
+ | fs == (fsLit "~R#") = True
+ | otherwise
+ = case (if nullFS fs then [] else unpackFS fs) of
+ [] -> False
+ (c:cs) -> startsVarSym c && all isVarSymChar cs
+ -- See Note [Classification of generated names]
-------------
startsVarSym, startsVarId, startsConSym, startsConId :: Char -> Bool
-startsVarSym c = isSymbolASCII c || (ord c > 0x7f && isSymbol c) -- Infix Ids
-startsConSym c = c == ':' -- Infix data constructors
+startsVarSym c = isSymbolASCII c || (ord c > 0x7f && isSymbol c) -- Infix Ids
+startsConSym c = c == ':' -- Infix data constructors
startsVarId c = isLower c || c == '_' -- Ordinary Ids
startsConId c = isUpper c || c == '(' -- Ordinary type constructors and data constructors
isSymbolASCII :: Char -> Bool
isSymbolASCII c = c `elem` "!#$%&*+./<=>?@\\^|~-"
+
+isVarSymChar :: Char -> Bool
+isVarSymChar c = c == ':' || startsVarSym c
\end{code}
%************************************************************************
diff --git a/compiler/basicTypes/PatSyn.lhs b/compiler/basicTypes/PatSyn.lhs
index 9285b3c365a7..cba842729276 100644
--- a/compiler/basicTypes/PatSyn.lhs
+++ b/compiler/basicTypes/PatSyn.lhs
@@ -5,21 +5,25 @@
\section[PatSyn]{@PatSyn@: Pattern synonyms}
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module PatSyn (
-- * Main data types
PatSyn, mkPatSyn,
-- ** Type deconstruction
- patSynId, patSynType, patSynArity, patSynIsInfix,
- patSynArgs, patSynArgTys, patSynTyDetails,
+ patSynName, patSynArity, patSynIsInfix,
+ patSynArgs, patSynTyDetails, patSynType,
patSynWrapper, patSynMatcher,
- patSynExTyVars, patSynSig, patSynInstArgTys
+ patSynExTyVars, patSynSig,
+ patSynInstArgTys, patSynInstResTy,
+ tidyPatSynIds, patSynIds
) where
#include "HsVersions.h"
import Type
+import TcType( mkSigmaTy )
import Name
import Outputable
import Unique
@@ -27,8 +31,6 @@ import Util
import BasicTypes
import FastString
import Var
-import Id
-import TcType
import HsBinds( HsPatSynDetails(..) )
import qualified Data.Data as Data
@@ -37,8 +39,8 @@ import Data.Function
\end{code}
-Pattern synonym representation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note [Pattern synonym representation]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider the following pattern synonym declaration
pattern P x = MkT [x] (Just 42)
@@ -58,15 +60,49 @@ with the following typeclass constraints:
In this case, the fields of MkPatSyn will be set as follows:
- psArgs = [x :: b]
+ psArgs = [b]
psArity = 1
psInfix = False
psUnivTyVars = [t]
psExTyVars = [b]
- psTheta = ((Show (Maybe t), Ord b), (Eq t, Num t))
+ psProvTheta = (Show (Maybe t), Ord b)
+ psReqTheta = (Eq t, Num t)
psOrigResTy = T (Maybe t)
+Note [Matchers and wrappers for pattern synonyms]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For each pattern synonym, we generate a single matcher function which
+implements the actual matching. For the above example, the matcher
+will have type:
+
+ $mP :: forall r t. (Eq t, Num t)
+ => T (Maybe t)
+ -> (forall b. (Show (Maybe t), Ord b) => b -> r)
+ -> r
+ -> r
+
+with the following implementation:
+
+ $mP @r @t $dEq $dNum scrut cont fail = case scrut of
+ MkT @b $dShow $dOrd [x] (Just 42) -> cont @b $dShow $dOrd x
+ _ -> fail
+
+For *bidirectional* pattern synonyms, we also generate a single wrapper
+function which implements the pattern synonym in an expression
+context. For our running example, it will be:
+
+ $WP :: forall t b. (Show (Maybe t), Ord b, Eq t, Num t)
+ => b -> T (Maybe t)
+ $WP x = MkT [x] (Just 42)
+
+NB: the existential/universal and required/provided split does not
+apply to the wrapper since you are only putting stuff in, not getting
+stuff out.
+
+Injectivity of bidirectional pattern synonyms is checked in
+tcPatToExpr which walks the pattern and returns its corresponding
+expression when available.
%************************************************************************
%* *
@@ -76,21 +112,36 @@ In this case, the fields of MkPatSyn will be set as follows:
\begin{code}
-- | A pattern synonym
+-- See Note [Pattern synonym representation]
data PatSyn
= MkPatSyn {
- psId :: Id,
- psUnique :: Unique, -- Cached from Name
- psMatcher :: Id,
- psWrapper :: Maybe Id,
+ psName :: Name,
+ psUnique :: Unique, -- Cached from Name
- psArgs :: [Var],
- psArity :: Arity, -- == length psArgs
- psInfix :: Bool, -- True <=> declared infix
+ psArgs :: [Type],
+ psArity :: Arity, -- == length psArgs
+ psInfix :: Bool, -- True <=> declared infix
- psUnivTyVars :: [TyVar], -- Universially-quantified type variables
- psExTyVars :: [TyVar], -- Existentially-quantified type vars
- psTheta :: (ThetaType, ThetaType), -- Provided and required dictionaries
- psOrigResTy :: Type
+ psUnivTyVars :: [TyVar], -- Universially-quantified type variables
+ psExTyVars :: [TyVar], -- Existentially-quantified type vars
+ psProvTheta :: ThetaType, -- Provided dictionaries
+ psReqTheta :: ThetaType, -- Required dictionaries
+ psOrigResTy :: Type, -- Mentions only psUnivTyVars
+
+ -- See Note [Matchers and wrappers for pattern synonyms]
+ psMatcher :: Id,
+ -- Matcher function, of type
+ -- forall r univ_tvs. req_theta
+ -- => res_ty
+ -- -> (forall ex_tvs. prov_theta -> arg_tys -> r)
+ -- -> r -> r
+
+ psWrapper :: Maybe Id
+ -- Nothing => uni-directional pattern synonym
+ -- Just wid => bi-direcitonal
+ -- Wrapper function, of type
+ -- forall univ_tvs, ex_tvs. (prov_theta, req_theta)
+ -- => arg_tys -> res_ty
}
deriving Data.Typeable.Typeable
\end{code}
@@ -117,7 +168,7 @@ instance Uniquable PatSyn where
getUnique = psUnique
instance NamedThing PatSyn where
- getName = getName . psId
+ getName = patSynName
instance Outputable PatSyn where
ppr = ppr . getName
@@ -144,7 +195,7 @@ instance Data.Data PatSyn where
-- | Build a new pattern synonym
mkPatSyn :: Name
-> Bool -- ^ Is the pattern synonym declared infix?
- -> [Var] -- ^ Original arguments
+ -> [Type] -- ^ Original arguments
-> [TyVar] -- ^ Universially-quantified type variables
-> [TyVar] -- ^ Existentially-quantified type variables
-> ThetaType -- ^ Wanted dicts
@@ -158,29 +209,30 @@ mkPatSyn name declared_infix orig_args
prov_theta req_theta
orig_res_ty
matcher wrapper
- = MkPatSyn {psId = id, psUnique = getUnique name,
+ = MkPatSyn {psName = name, psUnique = getUnique name,
psUnivTyVars = univ_tvs, psExTyVars = ex_tvs,
- psTheta = (prov_theta, req_theta),
+ psProvTheta = prov_theta, psReqTheta = req_theta,
psInfix = declared_infix,
psArgs = orig_args,
psArity = length orig_args,
psOrigResTy = orig_res_ty,
psMatcher = matcher,
psWrapper = wrapper }
- where
- pat_ty = mkSigmaTy univ_tvs req_theta $
- mkSigmaTy ex_tvs prov_theta $
- mkFunTys (map varType orig_args) orig_res_ty
- id = mkLocalId name pat_ty
\end{code}
\begin{code}
-- | The 'Name' of the 'PatSyn', giving it a unique, rooted identification
-patSynId :: PatSyn -> Id
-patSynId = psId
+patSynName :: PatSyn -> Name
+patSynName = psName
patSynType :: PatSyn -> Type
-patSynType = psOrigResTy
+-- The full pattern type, used only in error messages
+patSynType (MkPatSyn { psUnivTyVars = univ_tvs, psReqTheta = req_theta
+ , psExTyVars = ex_tvs, psProvTheta = prov_theta
+ , psArgs = orig_args, psOrigResTy = orig_res_ty })
+ = mkSigmaTy univ_tvs req_theta $
+ mkSigmaTy ex_tvs prov_theta $
+ mkFunTys orig_args orig_res_ty
-- | Should the 'PatSyn' be presented infix?
patSynIsInfix :: PatSyn -> Bool
@@ -190,22 +242,24 @@ patSynIsInfix = psInfix
patSynArity :: PatSyn -> Arity
patSynArity = psArity
-patSynArgs :: PatSyn -> [Var]
+patSynArgs :: PatSyn -> [Type]
patSynArgs = psArgs
-patSynArgTys :: PatSyn -> [Type]
-patSynArgTys = map varType . patSynArgs
-
patSynTyDetails :: PatSyn -> HsPatSynDetails Type
-patSynTyDetails ps = case (patSynIsInfix ps, patSynArgTys ps) of
- (True, [left, right]) -> InfixPatSyn left right
- (_, tys) -> PrefixPatSyn tys
+patSynTyDetails (MkPatSyn { psInfix = is_infix, psArgs = arg_tys })
+ | is_infix, [left,right] <- arg_tys
+ = InfixPatSyn left right
+ | otherwise
+ = PrefixPatSyn arg_tys
patSynExTyVars :: PatSyn -> [TyVar]
patSynExTyVars = psExTyVars
-patSynSig :: PatSyn -> ([TyVar], [TyVar], (ThetaType, ThetaType))
-patSynSig ps = (psUnivTyVars ps, psExTyVars ps, psTheta ps)
+patSynSig :: PatSyn -> ([TyVar], [TyVar], ThetaType, ThetaType, [Type], Type)
+patSynSig (MkPatSyn { psUnivTyVars = univ_tvs, psExTyVars = ex_tvs
+ , psProvTheta = prov, psReqTheta = req
+ , psArgs = arg_tys, psOrigResTy = res_ty })
+ = (univ_tvs, ex_tvs, prov, req, arg_tys, res_ty)
patSynWrapper :: PatSyn -> Maybe Id
patSynWrapper = psWrapper
@@ -213,13 +267,43 @@ patSynWrapper = psWrapper
patSynMatcher :: PatSyn -> Id
patSynMatcher = psMatcher
+patSynIds :: PatSyn -> [Id]
+patSynIds (MkPatSyn { psMatcher = match_id, psWrapper = mb_wrap_id })
+ = case mb_wrap_id of
+ Nothing -> [match_id]
+ Just wrap_id -> [match_id, wrap_id]
+
+tidyPatSynIds :: (Id -> Id) -> PatSyn -> PatSyn
+tidyPatSynIds tidy_fn ps@(MkPatSyn { psMatcher = match_id, psWrapper = mb_wrap_id })
+ = ps { psMatcher = tidy_fn match_id, psWrapper = fmap tidy_fn mb_wrap_id }
+
patSynInstArgTys :: PatSyn -> [Type] -> [Type]
-patSynInstArgTys ps inst_tys
+-- Return the types of the argument patterns
+-- e.g. data D a = forall b. MkD a b (b->a)
+-- pattern P f x y = MkD (x,True) y f
+-- D :: forall a. forall b. a -> b -> (b->a) -> D a
+-- P :: forall c. forall b. (b->(c,Bool)) -> c -> b -> P c
+-- patSynInstArgTys P [Int,bb] = [bb->(Int,Bool), Int, bb]
+-- NB: the inst_tys should be both universal and existential
+patSynInstArgTys (MkPatSyn { psName = name, psUnivTyVars = univ_tvs
+ , psExTyVars = ex_tvs, psArgs = arg_tys })
+ inst_tys
= ASSERT2( length tyvars == length inst_tys
- , ptext (sLit "patSynInstArgTys") <+> ppr ps $$ ppr tyvars $$ ppr inst_tys )
+ , ptext (sLit "patSynInstArgTys") <+> ppr name $$ ppr tyvars $$ ppr inst_tys )
map (substTyWith tyvars inst_tys) arg_tys
where
- (univ_tvs, ex_tvs, _) = patSynSig ps
- arg_tys = map varType (psArgs ps)
tyvars = univ_tvs ++ ex_tvs
+
+patSynInstResTy :: PatSyn -> [Type] -> Type
+-- Return the type of whole pattern
+-- E.g. pattern P x y = Just (x,x,y)
+-- P :: a -> b -> Just (a,a,b)
+-- (patSynInstResTy P [Int,Bool] = Maybe (Int,Int,Bool)
+-- NB: unlikepatSynInstArgTys, the inst_tys should be just the *universal* tyvars
+patSynInstResTy (MkPatSyn { psName = name, psUnivTyVars = univ_tvs
+ , psOrigResTy = res_ty })
+ inst_tys
+ = ASSERT2( length univ_tvs == length inst_tys
+ , ptext (sLit "patSynInstResTy") <+> ppr name $$ ppr univ_tvs $$ ppr inst_tys )
+ substTyWith univ_tvs inst_tys res_ty
\end{code}
diff --git a/compiler/basicTypes/RdrName.lhs b/compiler/basicTypes/RdrName.lhs
index 4ffeae0d7736..d4afaf10fc04 100644
--- a/compiler/basicTypes/RdrName.lhs
+++ b/compiler/basicTypes/RdrName.lhs
@@ -4,7 +4,7 @@
%
\begin{code}
-{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
-- |
-- #name_types#
@@ -331,49 +331,71 @@ instance Ord RdrName where
-- It is keyed by OccName, because we never use it for qualified names
-- We keep the current mapping, *and* the set of all Names in scope
-- Reason: see Note [Splicing Exact Names] in RnEnv
-type LocalRdrEnv = (OccEnv Name, NameSet)
+data LocalRdrEnv = LRE { lre_env :: OccEnv Name
+ , lre_in_scope :: NameSet }
+
+instance Outputable LocalRdrEnv where
+ ppr (LRE {lre_env = env, lre_in_scope = ns})
+ = hang (ptext (sLit "LocalRdrEnv {"))
+ 2 (vcat [ ptext (sLit "env =") <+> pprOccEnv ppr_elt env
+ , ptext (sLit "in_scope =") <+> braces (pprWithCommas ppr (nameSetToList ns))
+ ] <+> char '}')
+ where
+ ppr_elt name = parens (ppr (getUnique (nameOccName name))) <+> ppr name
+ -- So we can see if the keys line up correctly
emptyLocalRdrEnv :: LocalRdrEnv
-emptyLocalRdrEnv = (emptyOccEnv, emptyNameSet)
+emptyLocalRdrEnv = LRE { lre_env = emptyOccEnv, lre_in_scope = emptyNameSet }
extendLocalRdrEnv :: LocalRdrEnv -> Name -> LocalRdrEnv
-- The Name should be a non-top-level thing
-extendLocalRdrEnv (env, ns) name
+extendLocalRdrEnv (LRE { lre_env = env, lre_in_scope = ns }) name
= WARN( isExternalName name, ppr name )
- ( extendOccEnv env (nameOccName name) name
- , addOneToNameSet ns name
- )
+ LRE { lre_env = extendOccEnv env (nameOccName name) name
+ , lre_in_scope = addOneToNameSet ns name }
extendLocalRdrEnvList :: LocalRdrEnv -> [Name] -> LocalRdrEnv
-extendLocalRdrEnvList (env, ns) names
+extendLocalRdrEnvList (LRE { lre_env = env, lre_in_scope = ns }) names
= WARN( any isExternalName names, ppr names )
- ( extendOccEnvList env [(nameOccName n, n) | n <- names]
- , addListToNameSet ns names
- )
+ LRE { lre_env = extendOccEnvList env [(nameOccName n, n) | n <- names]
+ , lre_in_scope = addListToNameSet ns names }
lookupLocalRdrEnv :: LocalRdrEnv -> RdrName -> Maybe Name
-lookupLocalRdrEnv (env, _) (Unqual occ) = lookupOccEnv env occ
-lookupLocalRdrEnv _ _ = Nothing
+lookupLocalRdrEnv (LRE { lre_env = env }) (Unqual occ) = lookupOccEnv env occ
+lookupLocalRdrEnv _ _ = Nothing
lookupLocalRdrOcc :: LocalRdrEnv -> OccName -> Maybe Name
-lookupLocalRdrOcc (env, _) occ = lookupOccEnv env occ
+lookupLocalRdrOcc (LRE { lre_env = env }) occ = lookupOccEnv env occ
elemLocalRdrEnv :: RdrName -> LocalRdrEnv -> Bool
-elemLocalRdrEnv rdr_name (env, _)
- | isUnqual rdr_name = rdrNameOcc rdr_name `elemOccEnv` env
- | otherwise = False
+elemLocalRdrEnv rdr_name (LRE { lre_env = env, lre_in_scope = ns })
+ = case rdr_name of
+ Unqual occ -> occ `elemOccEnv` env
+ Exact name -> name `elemNameSet` ns -- See Note [Local bindings with Exact Names]
+ Qual {} -> False
+ Orig {} -> False
localRdrEnvElts :: LocalRdrEnv -> [Name]
-localRdrEnvElts (env, _) = occEnvElts env
+localRdrEnvElts (LRE { lre_env = env }) = occEnvElts env
inLocalRdrEnvScope :: Name -> LocalRdrEnv -> Bool
-- This is the point of the NameSet
-inLocalRdrEnvScope name (_, ns) = name `elemNameSet` ns
+inLocalRdrEnvScope name (LRE { lre_in_scope = ns }) = name `elemNameSet` ns
delLocalRdrEnvList :: LocalRdrEnv -> [OccName] -> LocalRdrEnv
-delLocalRdrEnvList (env, ns) occs = (delListFromOccEnv env occs, ns)
+delLocalRdrEnvList (LRE { lre_env = env, lre_in_scope = ns }) occs
+ = LRE { lre_env = delListFromOccEnv env occs
+ , lre_in_scope = ns }
\end{code}
+Note [Local bindings with Exact Names]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+With Template Haskell we can make local bindings that have Exact Names.
+Computing shadowing etc may use elemLocalRdrEnv (at least it certainly
+does so in RnTpes.bindHsTyVars), so for an Exact Name we must consult
+the in-scope-name-set.
+
+
%************************************************************************
%* *
GlobalRdrEnv
@@ -586,7 +608,7 @@ pickGREs rdr_name gres
= ASSERT2( isSrcRdrName rdr_name, ppr rdr_name )
candidates
where
- candidates = mapCatMaybes pick gres
+ candidates = mapMaybe pick gres
internal_candidates = filter (isInternalName . gre_name) candidates
rdr_is_unqual = isUnqual rdr_name
@@ -700,7 +722,7 @@ shadow_name env name
= alterOccEnv (fmap alter_fn) env (nameOccName name)
where
alter_fn :: [GlobalRdrElt] -> [GlobalRdrElt]
- alter_fn gres = mapCatMaybes (shadow_with name) gres
+ alter_fn gres = mapMaybe (shadow_with name) gres
shadow_with :: Name -> GlobalRdrElt -> Maybe GlobalRdrElt
shadow_with new_name old_gre@(GRE { gre_name = old_name, gre_prov = LocalDef })
@@ -719,7 +741,7 @@ shadow_name env name
| null imp_specs' = Nothing
| otherwise = Just (old_gre { gre_prov = Imported imp_specs' })
where
- imp_specs' = mapCatMaybes (shadow_is new_name) imp_specs
+ imp_specs' = mapMaybe (shadow_is new_name) imp_specs
shadow_is :: Name -> ImportSpec -> Maybe ImportSpec
shadow_is new_name is@(ImpSpec { is_decl = id_spec })
@@ -795,7 +817,7 @@ data ImpDeclSpec
-- the defining module for this thing!
-- TODO: either should be Module, or there
- -- should be a Maybe PackageId here too.
+ -- should be a Maybe PackageKey here too.
is_as :: ModuleName, -- ^ Import alias, e.g. from @as M@ (or @Muggle@ if there is no @as@ clause)
is_qual :: Bool, -- ^ Was this import qualified?
is_dloc :: SrcSpan -- ^ The location of the entire import declaration
diff --git a/compiler/basicTypes/SrcLoc.lhs b/compiler/basicTypes/SrcLoc.lhs
index d53ac2b0eacc..006bce9a31e1 100644
--- a/compiler/basicTypes/SrcLoc.lhs
+++ b/compiler/basicTypes/SrcLoc.lhs
@@ -3,6 +3,7 @@
%
\begin{code}
+{-# LANGUAGE DeriveDataTypeable #-}
{-# OPTIONS_GHC -fno-omit-interface-pragmas #-}
-- Workaround for Trac #5252 crashes the bootstrap compiler without -O
-- When the earliest compiler we want to boostrap with is
@@ -45,7 +46,7 @@ module SrcLoc (
srcSpanStart, srcSpanEnd,
realSrcSpanStart, realSrcSpanEnd,
srcSpanFileName_maybe,
- showUserSpan,
+ showUserSpan, showUserRealSpan,
-- ** Unsafely deconstructing SrcSpan
-- These are dubious exports, because they crash on some inputs
@@ -55,6 +56,8 @@ module SrcLoc (
-- ** Predicates on SrcSpan
isGoodSrcSpan, isOneLineSpan,
+ containsSpan,
+ isDumpSrcSpan,
-- * Located
Located,
@@ -263,8 +266,8 @@ data SrcSpan =
| UnhelpfulSpan !FastString -- Just a general indication
-- also used to indicate an empty span
- deriving (Eq, Typeable, Show) -- Show is used by Lexer.x, because we
- -- derive Show for Token
+ deriving (Eq, Ord, Typeable, Show) -- Show is used by Lexer.x, because we
+ -- derive Show for Token
-- | Built-in "bad" 'SrcSpan's for common sources of location uncertainty
noSrcSpan, wiredInSrcSpan :: SrcSpan
@@ -347,6 +350,20 @@ isOneLineSpan :: SrcSpan -> Bool
isOneLineSpan (RealSrcSpan s) = srcSpanStartLine s == srcSpanEndLine s
isOneLineSpan (UnhelpfulSpan _) = False
+-- | Tests whether the first span "contains" the other span, meaning
+-- that it covers at least as much source code. True if the spans are eqal.
+containsSpan :: RealSrcSpan -> RealSrcSpan -> Bool
+containsSpan s1 s2
+ = srcSpanFile s1 == srcSpanFile s2
+ && (srcSpanStartLine s1, srcSpanStartCol s1)
+ <= (srcSpanStartLine s2, srcSpanStartCol s2)
+ && (srcSpanEndLine s1, srcSpanEndCol s1)
+ >= (srcSpanEndLine s2, srcSpanEndCol s2)
+
+-- | Tests whether the source span refers to a generated dump file
+isDumpSrcSpan :: RealSrcSpan -> Bool
+isDumpSrcSpan = isPrefixOf ".dump" . takeExtension . unpackFS . srcSpanFile
+
\end{code}
%************************************************************************
@@ -423,12 +440,11 @@ srcSpanFileName_maybe (UnhelpfulSpan _) = Nothing
\begin{code}
--- We want to order SrcSpans first by the start point, then by the end point.
-instance Ord SrcSpan where
+-- We want to order RealSrcSpans first by the start point, then by the end point.
+instance Ord RealSrcSpan where
a `compare` b =
- (srcSpanStart a `compare` srcSpanStart b) `thenCmp`
- (srcSpanEnd a `compare` srcSpanEnd b)
-
+ (realSrcSpanStart a `compare` realSrcSpanStart b) `thenCmp`
+ (realSrcSpanEnd a `compare` realSrcSpanEnd b)
instance Outputable RealSrcSpan where
ppr span
diff --git a/compiler/basicTypes/UniqSupply.lhs b/compiler/basicTypes/UniqSupply.lhs
index fea1489efbd7..6ceee2079398 100644
--- a/compiler/basicTypes/UniqSupply.lhs
+++ b/compiler/basicTypes/UniqSupply.lhs
@@ -4,6 +4,8 @@
%
\begin{code}
+{-# LANGUAGE UnboxedTuples #-}
+
module UniqSupply (
-- * Main data type
UniqSupply, -- Abstractly
diff --git a/compiler/basicTypes/Unique.lhs b/compiler/basicTypes/Unique.lhs
index 037aed06419c..897b093e3923 100644
--- a/compiler/basicTypes/Unique.lhs
+++ b/compiler/basicTypes/Unique.lhs
@@ -16,7 +16,7 @@ Some of the other hair in this code is to be able to use a
Haskell).
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE CPP, BangPatterns, MagicHash #-}
module Unique (
-- * Main data types
diff --git a/compiler/basicTypes/Var.lhs b/compiler/basicTypes/Var.lhs
index 70c5d4491a6a..1f20d4adeccc 100644
--- a/compiler/basicTypes/Var.lhs
+++ b/compiler/basicTypes/Var.lhs
@@ -5,7 +5,8 @@
\section{@Vars@: Variables}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/basicTypes/VarSet.lhs b/compiler/basicTypes/VarSet.lhs
index b756283b91eb..8b7f755dcd4b 100644
--- a/compiler/basicTypes/VarSet.lhs
+++ b/compiler/basicTypes/VarSet.lhs
@@ -4,7 +4,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/cmm/Bitmap.hs b/compiler/cmm/Bitmap.hs
index 54db1a9a673b..e7aa07206359 100644
--- a/compiler/cmm/Bitmap.hs
+++ b/compiler/cmm/Bitmap.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
--
-- (c) The University of Glasgow 2003-2006
--
diff --git a/compiler/cmm/BlockId.hs b/compiler/cmm/BlockId.hs
index 8a46aed8f0f6..e4cc0bccb741 100644
--- a/compiler/cmm/BlockId.hs
+++ b/compiler/cmm/BlockId.hs
@@ -1,5 +1,7 @@
-{- BlockId module should probably go away completely, being superseded by Label -}
+{-# LANGUAGE TypeSynonymInstances #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
+
+{- BlockId module should probably go away completely, being superseded by Label -}
module BlockId
( BlockId, mkBlockId -- ToDo: BlockId should be abstract, but it isn't yet
, BlockSet, BlockEnv
diff --git a/compiler/cmm/CLabel.hs b/compiler/cmm/CLabel.hs
index 1b86f3d6b41c..8ce82d25583b 100644
--- a/compiler/cmm/CLabel.hs
+++ b/compiler/cmm/CLabel.hs
@@ -44,6 +44,8 @@ module CLabel (
mkStringLitLabel,
mkAsmTempLabel,
+ mkAsmTempDerivedLabel,
+ mkAsmTempEndLabel,
mkPlainModuleInitLabel,
@@ -54,8 +56,13 @@ module CLabel (
mkIndStaticInfoLabel,
mkMainCapabilityLabel,
mkMAP_FROZEN_infoLabel,
+ mkMAP_FROZEN0_infoLabel,
mkMAP_DIRTY_infoLabel,
+ mkSMAP_FROZEN_infoLabel,
+ mkSMAP_FROZEN0_infoLabel,
+ mkSMAP_DIRTY_infoLabel,
mkEMPTY_MVAR_infoLabel,
+ mkArrWords_infoLabel,
mkTopTickyCtrLabel,
mkCAFBlackHoleInfoTableLabel,
@@ -94,7 +101,7 @@ module CLabel (
mkHpcTicksLabel,
hasCAF,
- needsCDecl, isAsmTemp, maybeAsmTemp, externallyVisibleCLabel,
+ needsCDecl, maybeAsmTemp, externallyVisibleCLabel,
isMathFun,
isCFunctionLabel, isGcPtrLabel, labelDynamic,
@@ -118,6 +125,7 @@ import FastString
import DynFlags
import Platform
import UniqSet
+import PprCore ( {- instances -} )
-- -----------------------------------------------------------------------------
-- The CLabel type
@@ -153,14 +161,14 @@ data CLabel
-- | A label from a .cmm file that is not associated with a .hs level Id.
| CmmLabel
- PackageId -- what package the label belongs to.
+ PackageKey -- what package the label belongs to.
FastString -- identifier giving the prefix of the label
CmmLabelInfo -- encodes the suffix of the label
-- | A label with a baked-in \/ algorithmically generated name that definitely
-- comes from the RTS. The code for it must compile into libHSrts.a \/ libHSrts.so
-- If it doesn't have an algorithmically generated name then use a CmmLabel
- -- instead and give it an appropriate PackageId argument.
+ -- instead and give it an appropriate PackageKey argument.
| RtsLabel
RtsLabelInfo
@@ -185,6 +193,10 @@ data CLabel
| AsmTempLabel
{-# UNPACK #-} !Unique
+ | AsmTempDerivedLabel
+ CLabel
+ FastString -- suffix
+
| StringLitLabel
{-# UNPACK #-} !Unique
@@ -232,7 +244,7 @@ data CLabel
data ForeignLabelSource
-- | Label is in a named package
- = ForeignLabelInPackage PackageId
+ = ForeignLabelInPackage PackageKey
-- | Label is in some external, system package that doesn't also
-- contain compiled Haskell code, and is not associated with any .hi files.
@@ -400,26 +412,33 @@ mkStaticConEntryLabel name c = IdLabel name c StaticConEntry
-- Constructing Cmm Labels
mkDirty_MUT_VAR_Label, mkSplitMarkerLabel, mkUpdInfoLabel,
mkBHUpdInfoLabel, mkIndStaticInfoLabel, mkMainCapabilityLabel,
- mkMAP_FROZEN_infoLabel, mkMAP_DIRTY_infoLabel,
+ mkMAP_FROZEN_infoLabel, mkMAP_FROZEN0_infoLabel, mkMAP_DIRTY_infoLabel,
mkEMPTY_MVAR_infoLabel, mkTopTickyCtrLabel,
- mkCAFBlackHoleInfoTableLabel, mkCAFBlackHoleEntryLabel :: CLabel
+ mkCAFBlackHoleInfoTableLabel, mkCAFBlackHoleEntryLabel,
+ mkArrWords_infoLabel, mkSMAP_FROZEN_infoLabel, mkSMAP_FROZEN0_infoLabel,
+ mkSMAP_DIRTY_infoLabel :: CLabel
mkDirty_MUT_VAR_Label = mkForeignLabel (fsLit "dirty_MUT_VAR") Nothing ForeignLabelInExternalPackage IsFunction
-mkSplitMarkerLabel = CmmLabel rtsPackageId (fsLit "__stg_split_marker") CmmCode
-mkUpdInfoLabel = CmmLabel rtsPackageId (fsLit "stg_upd_frame") CmmInfo
-mkBHUpdInfoLabel = CmmLabel rtsPackageId (fsLit "stg_bh_upd_frame" ) CmmInfo
-mkIndStaticInfoLabel = CmmLabel rtsPackageId (fsLit "stg_IND_STATIC") CmmInfo
-mkMainCapabilityLabel = CmmLabel rtsPackageId (fsLit "MainCapability") CmmData
-mkMAP_FROZEN_infoLabel = CmmLabel rtsPackageId (fsLit "stg_MUT_ARR_PTRS_FROZEN0") CmmInfo
-mkMAP_DIRTY_infoLabel = CmmLabel rtsPackageId (fsLit "stg_MUT_ARR_PTRS_DIRTY") CmmInfo
-mkEMPTY_MVAR_infoLabel = CmmLabel rtsPackageId (fsLit "stg_EMPTY_MVAR") CmmInfo
-mkTopTickyCtrLabel = CmmLabel rtsPackageId (fsLit "top_ct") CmmData
-mkCAFBlackHoleInfoTableLabel = CmmLabel rtsPackageId (fsLit "stg_CAF_BLACKHOLE") CmmInfo
-mkCAFBlackHoleEntryLabel = CmmLabel rtsPackageId (fsLit "stg_CAF_BLACKHOLE") CmmEntry
+mkSplitMarkerLabel = CmmLabel rtsPackageKey (fsLit "__stg_split_marker") CmmCode
+mkUpdInfoLabel = CmmLabel rtsPackageKey (fsLit "stg_upd_frame") CmmInfo
+mkBHUpdInfoLabel = CmmLabel rtsPackageKey (fsLit "stg_bh_upd_frame" ) CmmInfo
+mkIndStaticInfoLabel = CmmLabel rtsPackageKey (fsLit "stg_IND_STATIC") CmmInfo
+mkMainCapabilityLabel = CmmLabel rtsPackageKey (fsLit "MainCapability") CmmData
+mkMAP_FROZEN_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_MUT_ARR_PTRS_FROZEN") CmmInfo
+mkMAP_FROZEN0_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_MUT_ARR_PTRS_FROZEN0") CmmInfo
+mkMAP_DIRTY_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_MUT_ARR_PTRS_DIRTY") CmmInfo
+mkEMPTY_MVAR_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_EMPTY_MVAR") CmmInfo
+mkTopTickyCtrLabel = CmmLabel rtsPackageKey (fsLit "top_ct") CmmData
+mkCAFBlackHoleInfoTableLabel = CmmLabel rtsPackageKey (fsLit "stg_CAF_BLACKHOLE") CmmInfo
+mkCAFBlackHoleEntryLabel = CmmLabel rtsPackageKey (fsLit "stg_CAF_BLACKHOLE") CmmEntry
+mkArrWords_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_ARR_WORDS") CmmInfo
+mkSMAP_FROZEN_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN") CmmInfo
+mkSMAP_FROZEN0_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN0") CmmInfo
+mkSMAP_DIRTY_infoLabel = CmmLabel rtsPackageKey (fsLit "stg_SMALL_MUT_ARR_PTRS_DIRTY") CmmInfo
-----
mkCmmInfoLabel, mkCmmEntryLabel, mkCmmRetInfoLabel, mkCmmRetLabel,
mkCmmCodeLabel, mkCmmDataLabel, mkCmmClosureLabel
- :: PackageId -> FastString -> CLabel
+ :: PackageKey -> FastString -> CLabel
mkCmmInfoLabel pkg str = CmmLabel pkg str CmmInfo
mkCmmEntryLabel pkg str = CmmLabel pkg str CmmEntry
@@ -535,6 +554,11 @@ mkStringLitLabel = StringLitLabel
mkAsmTempLabel :: Uniquable a => a -> CLabel
mkAsmTempLabel a = AsmTempLabel (getUnique a)
+mkAsmTempDerivedLabel :: CLabel -> FastString -> CLabel
+mkAsmTempDerivedLabel = AsmTempDerivedLabel
+
+mkAsmTempEndLabel :: CLabel -> CLabel
+mkAsmTempEndLabel l = mkAsmTempDerivedLabel l (fsLit "_end")
mkPlainModuleInitLabel :: Module -> CLabel
mkPlainModuleInitLabel mod = PlainModuleInitLabel mod
@@ -622,12 +646,13 @@ needsCDecl (PlainModuleInitLabel _) = True
needsCDecl (StringLitLabel _) = False
needsCDecl (AsmTempLabel _) = False
+needsCDecl (AsmTempDerivedLabel _ _) = False
needsCDecl (RtsLabel _) = False
needsCDecl (CmmLabel pkgId _ _)
-- Prototypes for labels defined in the runtime system are imported
-- into HC files via includes/Stg.h.
- | pkgId == rtsPackageId = False
+ | pkgId == rtsPackageKey = False
-- For other labels we inline one into the HC file directly.
| otherwise = True
@@ -640,12 +665,6 @@ needsCDecl (DynamicLinkerLabel {}) = panic "needsCDecl DynamicLinkerLabel"
needsCDecl PicBaseLabel = panic "needsCDecl PicBaseLabel"
needsCDecl (DeadStripPreventer {}) = panic "needsCDecl DeadStripPreventer"
--- | Check whether a label is a local temporary for native code generation
-isAsmTemp :: CLabel -> Bool
-isAsmTemp (AsmTempLabel _) = True
-isAsmTemp _ = False
-
-
-- | If a label is a local temporary used for native code generation
-- then return just its unique, otherwise nothing.
maybeAsmTemp :: CLabel -> Maybe Unique
@@ -751,6 +770,7 @@ externallyVisibleCLabel :: CLabel -> Bool -- not C "static"
externallyVisibleCLabel (CaseLabel _ _) = False
externallyVisibleCLabel (StringLitLabel _) = False
externallyVisibleCLabel (AsmTempLabel _) = False
+externallyVisibleCLabel (AsmTempDerivedLabel _ _)= False
externallyVisibleCLabel (PlainModuleInitLabel _)= True
externallyVisibleCLabel (RtsLabel _) = True
externallyVisibleCLabel (CmmLabel _ _ _) = True
@@ -837,11 +857,11 @@ idInfoLabelType info =
-- @labelDynamic@ returns @True@ if the label is located
-- in a DLL, be it a data reference or not.
-labelDynamic :: DynFlags -> PackageId -> Module -> CLabel -> Bool
+labelDynamic :: DynFlags -> PackageKey -> Module -> CLabel -> Bool
labelDynamic dflags this_pkg this_mod lbl =
case lbl of
-- is the RTS in a DLL or not?
- RtsLabel _ -> not (gopt Opt_Static dflags) && (this_pkg /= rtsPackageId)
+ RtsLabel _ -> not (gopt Opt_Static dflags) && (this_pkg /= rtsPackageKey)
IdLabel n _ _ -> isDllName dflags this_pkg this_mod n
@@ -874,7 +894,9 @@ labelDynamic dflags this_pkg this_mod lbl =
-- libraries
True
- PlainModuleInitLabel m -> not (gopt Opt_Static dflags) && this_pkg /= (modulePackageId m)
+ PlainModuleInitLabel m -> not (gopt Opt_Static dflags) && this_pkg /= (modulePackageKey m)
+
+ HpcTicksLabel m -> not (gopt Opt_Static dflags) && this_pkg /= (modulePackageKey m)
-- Note that DynamicLinkerLabels do NOT require dynamic linking themselves.
_ -> False
@@ -967,6 +989,13 @@ pprCLabel platform (AsmTempLabel u)
else
char '_' <> pprUnique u
+pprCLabel platform (AsmTempDerivedLabel l suf)
+ | cGhcWithNativeCodeGen == "YES"
+ = ptext (asmTempLabelPrefix platform)
+ <> case l of AsmTempLabel u -> pprUnique u
+ _other -> pprCLabel platform l
+ <> ftext suf
+
pprCLabel platform (DynamicLinkerLabel info lbl)
| cGhcWithNativeCodeGen == "YES"
= pprDynamicLinkerAsmLabel platform info lbl
@@ -1092,6 +1121,7 @@ pprCLbl (HpcTicksLabel mod)
= ptext (sLit "_hpc_tickboxes_") <> ppr mod <> ptext (sLit "_hpc")
pprCLbl (AsmTempLabel {}) = panic "pprCLbl AsmTempLabel"
+pprCLbl (AsmTempDerivedLabel {})= panic "pprCLbl AsmTempDerivedLabel"
pprCLbl (DynamicLinkerLabel {}) = panic "pprCLbl DynamicLinkerLabel"
pprCLbl (PicBaseLabel {}) = panic "pprCLbl PicBaseLabel"
pprCLbl (DeadStripPreventer {}) = panic "pprCLbl DeadStripPreventer"
diff --git a/compiler/cmm/Cmm.hs b/compiler/cmm/Cmm.hs
index fadce0b5eb2b..9e9bae93c6aa 100644
--- a/compiler/cmm/Cmm.hs
+++ b/compiler/cmm/Cmm.hs
@@ -1,5 +1,5 @@
-- Cmm representations using Hoopl's Graph CmmNode e x.
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE CPP, GADTs #-}
module Cmm (
-- * Cmm top-level datatypes
@@ -80,10 +80,7 @@ data GenCmmDecl d h g
-- registers will be correct in generated C-- code, but
-- not in hand-written C-- code. However,
-- splitAtProcPoints calculates correct liveness
- -- information for CmmProc's. Right now only the LLVM
- -- back-end relies on correct liveness information and
- -- for that back-end we always call splitAtProcPoints, so
- -- all is good.
+ -- information for CmmProcs.
g -- Control-flow graph for the procedure's code
| CmmData -- Static data
diff --git a/compiler/cmm/CmmBuildInfoTables.hs b/compiler/cmm/CmmBuildInfoTables.hs
index 04c3b71494d7..6521a84006f0 100644
--- a/compiler/cmm/CmmBuildInfoTables.hs
+++ b/compiler/cmm/CmmBuildInfoTables.hs
@@ -1,10 +1,6 @@
-{-# LANGUAGE GADTs, NoMonoLocalBinds #-}
+{-# LANGUAGE CPP, GADTs #-}
--- Norman likes local bindings
--- If this module lives on I'd like to get rid of the NoMonoLocalBinds
--- extension in due course
-
--- Todo: remove -fno-warn-warnings-deprecations
+-- See Note [Deprecations in Hoopl] in Hoopl module
{-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
module CmmBuildInfoTables
( CAFSet, CAFEnv, cafAnal
@@ -290,7 +286,7 @@ bundle :: Map CLabel CAFSet
-> (CAFEnv, CmmDecl)
-> (CAFSet, Maybe CLabel)
-> (BlockEnv CAFSet, CmmDecl)
-bundle flatmap (env, decl@(CmmProc infos lbl _ g)) (closure_cafs, mb_lbl)
+bundle flatmap (env, decl@(CmmProc infos _lbl _ g)) (closure_cafs, mb_lbl)
= ( mapMapWithKey get_cafs (info_tbls infos), decl )
where
entry = g_entry g
@@ -301,9 +297,13 @@ bundle flatmap (env, decl@(CmmProc infos lbl _ g)) (closure_cafs, mb_lbl)
get_cafs l _
| l == entry = entry_cafs
- | otherwise = if not (mapMember l env)
- then pprPanic "bundle" (ppr l <+> ppr lbl <+> ppr (info_tbls infos) $$ ppr env $$ ppr decl)
- else flatten flatmap $ expectJust "bundle" $ mapLookup l env
+ | Just info <- mapLookup l env = flatten flatmap info
+ | otherwise = Set.empty
+ -- the label might not be in the env if the code corresponding to
+ -- this info table was optimised away (perhaps because it was
+ -- unreachable). In this case it doesn't matter what SRT we
+ -- infer, since the info table will not appear in the generated
+ -- code. See #9329.
bundle _flatmap (_, decl) _
= ( mapEmpty, decl )
diff --git a/compiler/cmm/CmmCallConv.hs b/compiler/cmm/CmmCallConv.hs
index 60e2c8c8f73f..f36fc0bae5df 100644
--- a/compiler/cmm/CmmCallConv.hs
+++ b/compiler/cmm/CmmCallConv.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CmmCallConv (
ParamLocation(..),
diff --git a/compiler/cmm/CmmCommonBlockElim.hs b/compiler/cmm/CmmCommonBlockElim.hs
index 34e22cecfb8d..b3fbe785558e 100644
--- a/compiler/cmm/CmmCommonBlockElim.hs
+++ b/compiler/cmm/CmmCommonBlockElim.hs
@@ -13,8 +13,10 @@ import Prelude hiding (iterate, succ, unzip, zip)
import Hoopl hiding (ChangeFlag)
import Data.Bits
+import Data.Maybe (fromMaybe,mapMaybe)
import qualified Data.List as List
import Data.Word
+import qualified Data.Map as M
import Outputable
import UniqFM
@@ -37,7 +39,7 @@ my_trace = if False then pprTrace else \_ _ a -> a
-- TODO: Use optimization fuel
elimCommonBlocks :: CmmGraph -> CmmGraph
-elimCommonBlocks g = replaceLabels env g
+elimCommonBlocks g = replaceLabels env $ copyTicks env g
where
env = iterate hashed_blocks mapEmpty
hashed_blocks = map (\b -> (hash_block b, b)) $ postorderDfs g
@@ -59,17 +61,13 @@ type HashCode = Int
common_block :: State -> (HashCode, CmmBlock) -> State
common_block (old_change, bmap, subst) (hash, b) =
case lookupUFM bmap hash of
- Just bs -> case (List.find (eqBlockBodyWith (eqBid subst) b) bs,
- mapLookup bid subst) of
- (Just b', Nothing) -> addSubst b'
- (Just b', Just b'') | entryLabel b' /= b'' -> addSubst b'
- | otherwise -> (old_change, bmap, subst)
- _ -> (old_change, addToUFM bmap hash (b : bs), subst)
- Nothing -> (old_change, addToUFM bmap hash [b], subst)
+ Just bs | Just b' <- List.find (eqBlockBodyWith (eqBid subst) b) bs
+ -> if mapLookup bid subst == Just (entryLabel b')
+ then (old_change, bmap, subst)
+ else my_trace "found new common block" (ppr bid <> char '=' <> ppr (entryLabel b')) $
+ (True, bmap, mapInsert bid (entryLabel b') subst)
+ m_bs -> (old_change, addToUFM bmap hash (b : fromMaybe [] m_bs), subst)
where bid = entryLabel b
- addSubst b' = my_trace "found new common block" (ppr bid <> char '=' <> ppr (entryLabel b')) $
- (True, bmap, mapInsert bid (entryLabel b') subst)
-
-- -----------------------------------------------------------------------------
-- Hashing and equality on blocks
@@ -89,7 +87,8 @@ hash_block block =
hash_lst m h = hash_node m + h `shiftL` 1
hash_node :: CmmNode O x -> Word32
- hash_node (CmmComment _) = 0 -- don't care
+ hash_node n | dont_care n = 0 -- don't care
+ hash_node (CmmUnwind _ e) = hash_e e
hash_node (CmmAssign r e) = hash_reg r + hash_e e
hash_node (CmmStore e e') = hash_e e + hash_e e'
hash_node (CmmUnsafeForeignCall t _ as) = hash_tgt t + hash_list hash_e as
@@ -98,6 +97,7 @@ hash_block block =
hash_node (CmmCall e _ _ _ _ _) = hash_e e
hash_node (CmmForeignCall t _ _ _ _ _ _) = hash_tgt t
hash_node (CmmSwitch e _) = hash_e e
+ hash_node _ = error "hash_node: unknown Cmm node!"
hash_reg :: CmmReg -> Word32
hash_reg (CmmLocal _) = 117
@@ -127,6 +127,13 @@ hash_block block =
hash_list f = foldl (\z x -> f x + z) (0::Word32)
cvt = fromInteger . toInteger
+
+-- | Ignore these node types for equality
+dont_care :: CmmNode O x -> Bool
+dont_care CmmComment {} = True
+dont_care CmmTick {} = True
+dont_care _other = False
+
-- Utilities: equality and substitution on the graph.
-- Given a map ``subst'' from BlockID -> BlockID, we define equality.
@@ -143,7 +150,6 @@ lookupBid subst bid = case mapLookup bid subst of
--
eqMiddleWith :: (BlockId -> BlockId -> Bool)
-> CmmNode O O -> CmmNode O O -> Bool
-eqMiddleWith _ (CmmComment _) (CmmComment _) = True
eqMiddleWith eqBid (CmmAssign r1 e1) (CmmAssign r2 e2)
= r1 == r2 && eqExprWith eqBid e1 e2
eqMiddleWith eqBid (CmmStore l1 r1) (CmmStore l2 r2)
@@ -178,10 +184,12 @@ eqExprWith eqBid = eq
-- IDs to block IDs.
eqBlockBodyWith :: (BlockId -> BlockId -> Bool) -> CmmBlock -> CmmBlock -> Bool
eqBlockBodyWith eqBid block block'
- = and (zipWith (eqMiddleWith eqBid) (blockToList m) (blockToList m')) &&
+ = and (zipWith (eqMiddleWith eqBid) nodes nodes') &&
eqLastWith eqBid l l'
where (_,m,l) = blockSplit block
+ nodes = filter (not . dont_care) (blockToList m)
(_,m',l') = blockSplit block'
+ nodes' = filter (not . dont_care) (blockToList m')
@@ -202,3 +210,20 @@ eqMaybeWith :: (a -> b -> Bool) -> Maybe a -> Maybe b -> Bool
eqMaybeWith eltEq (Just e) (Just e') = eltEq e e'
eqMaybeWith _ Nothing Nothing = True
eqMaybeWith _ _ _ = False
+
+copyTicks :: BlockEnv BlockId -> CmmGraph -> CmmGraph
+copyTicks env g = ofBlockMap (g_entry g) $ mapMap copyTo blockMap
+ where -- Reverse block merge map
+ blockMap = toBlockMap g
+ revEnv = mapFoldWithKey insertRev M.empty env
+ insertRev k x = M.insertWith (const (k:)) x [k]
+ -- Copy ticks and scopes into the given block
+ copyTo block = case M.lookup (entryLabel block) revEnv of
+ Nothing -> block
+ Just ls -> foldr copy block $ mapMaybe (flip mapLookup blockMap) ls
+ copy from to =
+ let ticks = blockTicks from
+ CmmEntry _ scp0 = firstNode from
+ (CmmEntry lbl scp1, code) = blockSplitHead to
+ in CmmEntry lbl (combineTickScopes scp0 scp1) `blockJoinHead`
+ foldr blockCons code (map CmmTick ticks)
diff --git a/compiler/cmm/CmmContFlowOpt.hs b/compiler/cmm/CmmContFlowOpt.hs
index 52b95a93ccfb..bcb4cf97b336 100644
--- a/compiler/cmm/CmmContFlowOpt.hs
+++ b/compiler/cmm/CmmContFlowOpt.hs
@@ -46,25 +46,20 @@ import Prelude hiding (succ, unzip, zip)
-- Note [Control-flow optimisations]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--
--- This optimisation does four things:
+-- This optimisation does three things:
--
-- - If a block finishes in an unconditonal branch to another block
-- and that is the only jump to that block we concatenate the
-- destination block at the end of the current one.
--
--- - If a block finishes in an unconditional branch, we may be able
--- to shortcut the destination block.
---
-- - If a block finishes in a call whose continuation block is a
-- goto, then we can shortcut the destination, making the
-- continuation block the destination of the goto - but see Note
-- [Shortcut call returns].
--
--- - For block finishing in conditional branch we try to invert the
--- condition and shortcut destination of alternatives.
---
-- - For any block that is not a call we try to shortcut the
--- destination(s).
+-- destination(s). Additionally, if a block ends with a
+-- conditional branch we try to invert the condition.
--
-- Blocks are processed using postorder DFS traversal. A side effect
-- of determining traversal order with a graph search is elimination
@@ -204,11 +199,8 @@ blockConcat splitting_procs g@CmmGraph { g_entry = entry_id }
-- (2) remove b' from the map of blocks
-- (3) remove information about b' from predecessors map
--
- -- This guard must be first so that we always eliminate blocks that have
- -- only one predecessor. If we had a target block that is both
- -- shorcutable and has only one predecessor and attempted to shortcut it
- -- first we would make that block unreachable but would not remove it
- -- from the graph.
+ -- Since we know that the block has only one predecessor we call
+ -- mapDelete directly instead of calling decPreds.
--
-- Note that we always maintain an up-to-date list of predecessors, so
-- we can ignore the contents of shortcut_map
@@ -220,20 +212,6 @@ blockConcat splitting_procs g@CmmGraph { g_entry = entry_id }
, shortcut_map
, mapDelete b' backEdges )
- -- If:
- -- (1) current block ends with unconditional branch to b' and
- -- (2) we can shortcut block b'
- -- Then:
- -- (1) concatenate b' at the end of current block, effectively
- -- changing target of uncondtional jump from b' to dest
- -- (2) increase number of predecessors of dest by 1
- -- (3) decrease number of predecessors of b' by 1
- | CmmBranch b' <- last
- , Just blk' <- mapLookup b' blocks
- , Just dest <- canShortcut blk'
- = ( mapInsert bid (splice head blk') blocks, shortcut_map,
- decPreds b' $ incPreds dest backEdges )
-
-- If:
-- (1) we are splitting proc points (see Note
-- [Shortcut call returns and proc-points]) and
@@ -263,7 +241,10 @@ blockConcat splitting_procs g@CmmGraph { g_entry = entry_id }
-- conditional
-- (2) attempt to shortcut all destination blocks
-- (3) if new successors of a block are different from the old ones
- -- we update the of predecessors accordingly
+ -- update the of predecessors accordingly
+ --
+ -- A special case of this is a situation when a block ends with an
+ -- unconditional jump to a block that can be shortcut.
| Nothing <- callContinuation_maybe last
= let oldSuccs = successors last
newSuccs = successors swapcond_last
@@ -336,18 +317,22 @@ decPreds bid edges = case mapLookup bid edges of
canShortcut :: CmmBlock -> Maybe BlockId
canShortcut block
| (_, middle, CmmBranch dest) <- blockSplit block
- , isEmptyBlock middle
+ , all dont_care $ blockToList middle
= Just dest
| otherwise
= Nothing
-
+ where dont_care CmmComment{} = True
+ dont_care CmmTick{} = True
+ dont_care _other = False
-- Concatenates two blocks. First one is assumed to be open on exit, the second
-- is assumed to be closed on entry (i.e. it has a label attached to it, which
-- the splice function removes by calling snd on result of blockSplitHead).
splice :: Block CmmNode C O -> CmmBlock -> CmmBlock
-splice head rest = head `blockAppend` snd (blockSplitHead rest)
-
+splice head rest = entry `blockJoinHead` code0 `blockAppend` code1
+ where (CmmEntry lbl sc0, code0) = blockSplitHead head
+ (CmmEntry _ sc1, code1) = blockSplitHead rest
+ entry = CmmEntry lbl (combineTickScopes sc0 sc1)
-- If node is a call with continuation call return Just label of that
-- continuation. Otherwise return Nothing.
@@ -394,14 +379,14 @@ predMap blocks = foldr add_preds mapEmpty blocks
-- Removing unreachable blocks
removeUnreachableBlocksProc :: CmmDecl -> CmmDecl
removeUnreachableBlocksProc proc@(CmmProc info lbl live g)
- | length used_blocks < mapSize (toBlockMap g)
- = CmmProc info' lbl live g'
+ | length used_blocks < mapSize (toBlockMap g)
+ = CmmProc info' lbl live g'
| otherwise
= proc
where
g' = ofBlockList (g_entry g) used_blocks
info' = info { info_tbls = keep_used (info_tbls info) }
- -- Remove any info_tbls for unreachable
+ -- Remove any info_tbls for unreachable
keep_used :: BlockEnv CmmInfoTable -> BlockEnv CmmInfoTable
keep_used bs = mapFoldWithKey keep emptyBlockMap bs
diff --git a/compiler/cmm/CmmExpr.hs b/compiler/cmm/CmmExpr.hs
index 0c0c9714ecea..1d6c97f41eea 100644
--- a/compiler/cmm/CmmExpr.hs
+++ b/compiler/cmm/CmmExpr.hs
@@ -1,4 +1,7 @@
+{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE UndecidableInstances #-}
module CmmExpr
diff --git a/compiler/cmm/CmmInfo.hs b/compiler/cmm/CmmInfo.hs
index 42c9e6ba53f4..3bfc728ac07f 100644
--- a/compiler/cmm/CmmInfo.hs
+++ b/compiler/cmm/CmmInfo.hs
@@ -1,10 +1,4 @@
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module CmmInfo (
mkEmptyContInfoTable,
cmmToRawCmm,
@@ -61,7 +55,7 @@ import Data.Word
-- When we split at proc points, we need an empty info table.
mkEmptyContInfoTable :: CLabel -> CmmInfoTable
-mkEmptyContInfoTable info_lbl
+mkEmptyContInfoTable info_lbl
= CmmInfoTable { cit_lbl = info_lbl
, cit_rep = mkStackRep []
, cit_prof = NoProfilingInfo
@@ -83,31 +77,31 @@ cmmToRawCmm dflags cmms
-- represented by a label+offset expression).
--
-- With tablesNextToCode, the layout is
---
---
---
+--
+--
+--
--
-- Without tablesNextToCode, the layout of an info table is
---
---
---
+--
+--
+--
--
--- See includes/rts/storage/InfoTables.h
+-- See includes/rts/storage/InfoTables.h
--
-- For return-points these are as follows
--
-- Tables next to code:
--
---
---
--- ret-addr -->
+--
+--
+-- ret-addr -->
--
-- Not tables-next-to-code:
--
--- ret-addr -->
---
---
+-- ret-addr -->
+--
+--
--
-- * The SRT slot is only there if there is SRT info to record
@@ -167,21 +161,21 @@ mkInfoTable dflags proc@(CmmProc infos entry_lbl live blocks)
reverse rel_extra_bits ++ rel_std_info))
-----------------------------------------------------
-type InfoTableContents = ( [CmmLit] -- The standard part
- , [CmmLit] ) -- The "extra bits"
+type InfoTableContents = ( [CmmLit] -- The standard part
+ , [CmmLit] ) -- The "extra bits"
-- These Lits have *not* had mkRelativeTo applied to them
mkInfoTableContents :: DynFlags
-> CmmInfoTable
-> Maybe Int -- Override default RTS type tag?
-> UniqSM ([RawCmmDecl], -- Auxiliary top decls
- InfoTableContents) -- Info tbl + extra bits
+ InfoTableContents) -- Info tbl + extra bits
mkInfoTableContents dflags
info@(CmmInfoTable { cit_lbl = info_lbl
, cit_rep = smrep
, cit_prof = prof
- , cit_srt = srt })
+ , cit_srt = srt })
mb_rts_tag
| RTSRep rts_tag rep <- smrep
= mkInfoTableContents dflags info{cit_rep = rep} (Just rts_tag)
@@ -215,9 +209,9 @@ mkInfoTableContents dflags
where
mk_pieces :: ClosureTypeInfo -> [CmmLit]
-> UniqSM ( Maybe StgHalfWord -- Override the SRT field with this
- , Maybe CmmLit -- Override the layout field with this
- , [CmmLit] -- "Extra bits" for info table
- , [RawCmmDecl]) -- Auxiliary data decls
+ , Maybe CmmLit -- Override the layout field with this
+ , [CmmLit] -- "Extra bits" for info table
+ , [RawCmmDecl]) -- Auxiliary data decls
mk_pieces (Constr con_tag con_descr) _no_srt -- A data constructor
= do { (descr_lit, decl) <- newStringLit con_descr
; return ( Just (toStgHalfWord dflags (fromIntegral con_tag))
@@ -230,7 +224,7 @@ mkInfoTableContents dflags
= return (Just (toStgHalfWord dflags 0), Just (mkWordCLit dflags (fromIntegral offset)), [], [])
-- Layout known (one free var); we use the layout field for offset
- mk_pieces (Fun arity (ArgSpec fun_type)) srt_label
+ mk_pieces (Fun arity (ArgSpec fun_type)) srt_label
= do { let extra_bits = packIntsCLit dflags fun_type arity : srt_label
; return (Nothing, Nothing, extra_bits, []) }
@@ -280,7 +274,7 @@ mkSRTLit dflags (C_SRT lbl off bitmap) = ([cmmLabelOffW dflags lbl off], bitmap)
-------------------------------------------------------------------------
--
--- Position independent code
+-- Position independent code
--
-------------------------------------------------------------------------
-- In order to support position independent code, we mustn't put absolute
@@ -292,7 +286,7 @@ mkSRTLit dflags (C_SRT lbl off bitmap) = ([cmmLabelOffW dflags lbl off], bitmap)
-- as we want to keep binary compatibility between PIC and non-PIC.
makeRelativeRefTo :: DynFlags -> CLabel -> CmmLit -> CmmLit
-
+
makeRelativeRefTo dflags info_lbl (CmmLabel lbl)
| tablesNextToCode dflags
= CmmLabelDiffOff lbl info_lbl 0
@@ -304,16 +298,16 @@ makeRelativeRefTo _ _ lit = lit
-------------------------------------------------------------------------
--
--- Build a liveness mask for the stack layout
+-- Build a liveness mask for the stack layout
--
-------------------------------------------------------------------------
-- There are four kinds of things on the stack:
--
--- - pointer variables (bound in the environment)
--- - non-pointer variables (bound in the environment)
--- - free slots (recorded in the stack free list)
--- - non-pointer data slots (recorded in the stack free list)
+-- - pointer variables (bound in the environment)
+-- - non-pointer variables (bound in the environment)
+-- - free slots (recorded in the stack free list)
+-- - non-pointer data slots (recorded in the stack free list)
--
-- The first two are represented with a 'Just' of a 'LocalReg'.
-- The last two with one or more 'Nothing' constructors.
@@ -331,7 +325,7 @@ mkLivenessBits dflags liveness
| n_bits > mAX_SMALL_BITMAP_SIZE dflags -- does not fit in one word
= do { uniq <- getUniqueUs
; let bitmap_lbl = mkBitmapLabel uniq
- ; return (CmmLabel bitmap_lbl,
+ ; return (CmmLabel bitmap_lbl,
[mkRODataLits bitmap_lbl lits]) }
| otherwise -- Fits in one word
@@ -342,10 +336,10 @@ mkLivenessBits dflags liveness
bitmap :: Bitmap
bitmap = mkBitmap dflags liveness
- small_bitmap = case bitmap of
+ small_bitmap = case bitmap of
[] -> toStgWord dflags 0
[b] -> b
- _ -> panic "mkLiveness"
+ _ -> panic "mkLiveness"
bitmap_word = toStgWord dflags (fromIntegral n_bits)
.|. (small_bitmap `shiftL` bITMAP_BITS_SHIFT dflags)
@@ -356,7 +350,7 @@ mkLivenessBits dflags liveness
-------------------------------------------------------------------------
--
--- Generating a standard info table
+-- Generating a standard info table
--
-------------------------------------------------------------------------
@@ -369,23 +363,23 @@ mkLivenessBits dflags liveness
mkStdInfoTable
:: DynFlags
- -> (CmmLit,CmmLit) -- Closure type descr and closure descr (profiling)
+ -> (CmmLit,CmmLit) -- Closure type descr and closure descr (profiling)
-> Int -- Closure RTS tag
-> StgHalfWord -- SRT length
- -> CmmLit -- layout field
+ -> CmmLit -- layout field
-> [CmmLit]
mkStdInfoTable dflags (type_descr, closure_descr) cl_type srt_len layout_lit
- = -- Parallel revertible-black hole field
+ = -- Parallel revertible-black hole field
prof_info
- -- Ticky info (none at present)
- -- Debug info (none at present)
+ -- Ticky info (none at present)
+ -- Debug info (none at present)
++ [layout_lit, type_lit]
- where
- prof_info
- | gopt Opt_SccProfilingOn dflags = [type_descr, closure_descr]
- | otherwise = []
+ where
+ prof_info
+ | gopt Opt_SccProfilingOn dflags = [type_descr, closure_descr]
+ | otherwise = []
type_lit = packHalfWordsCLit dflags (toStgHalfWord dflags (fromIntegral cl_type)) srt_len
@@ -416,7 +410,7 @@ srtEscape dflags = toStgHalfWord dflags (-1)
-------------------------------------------------------------------------
--
--- Accessing fields of an info table
+-- Accessing fields of an info table
--
-------------------------------------------------------------------------
@@ -491,7 +485,7 @@ funInfoTable dflags info_ptr
= cmmOffsetB dflags info_ptr (- stdInfoTableSizeB dflags - sIZEOF_StgFunInfoExtraRev dflags)
| otherwise
= cmmOffsetW dflags info_ptr (1 + stdInfoTableSizeW dflags)
- -- Past the entry code pointer
+ -- Past the entry code pointer
-- Takes the info pointer of a function, returns the function's arity
funInfoArity :: DynFlags -> CmmExpr -> CmmExpr
@@ -514,7 +508,7 @@ funInfoArity dflags iptr
-- Info table sizes & offsets
--
-----------------------------------------------------------------------------
-
+
stdInfoTableSizeW :: DynFlags -> WordOff
-- The size of a standard info table varies with profiling/ticky etc,
-- so we can't get it from Constants
@@ -546,15 +540,14 @@ stdInfoTableSizeB :: DynFlags -> ByteOff
stdInfoTableSizeB dflags = stdInfoTableSizeW dflags * wORD_SIZE dflags
stdSrtBitmapOffset :: DynFlags -> ByteOff
--- Byte offset of the SRT bitmap half-word which is
+-- Byte offset of the SRT bitmap half-word which is
-- in the *higher-addressed* part of the type_lit
stdSrtBitmapOffset dflags = stdInfoTableSizeB dflags - hALF_WORD_SIZE dflags
stdClosureTypeOffset :: DynFlags -> ByteOff
--- Byte offset of the closure type half-word
+-- Byte offset of the closure type half-word
stdClosureTypeOffset dflags = stdInfoTableSizeB dflags - wORD_SIZE dflags
stdPtrsOffset, stdNonPtrsOffset :: DynFlags -> ByteOff
stdPtrsOffset dflags = stdInfoTableSizeB dflags - 2 * wORD_SIZE dflags
stdNonPtrsOffset dflags = stdInfoTableSizeB dflags - 2 * wORD_SIZE dflags + hALF_WORD_SIZE dflags
-
diff --git a/compiler/cmm/CmmLayoutStack.hs b/compiler/cmm/CmmLayoutStack.hs
index 95483a2f520b..b8be6afc9684 100644
--- a/compiler/cmm/CmmLayoutStack.hs
+++ b/compiler/cmm/CmmLayoutStack.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE RecordWildCards, GADTs #-}
+{-# LANGUAGE CPP, RecordWildCards, GADTs #-}
module CmmLayoutStack (
cmmLayoutStack, setInfoTableStackMap
) where
@@ -189,16 +189,10 @@ cmmLayoutStack :: DynFlags -> ProcPointSet -> ByteOff -> CmmGraph
cmmLayoutStack dflags procpoints entry_args
graph0@(CmmGraph { g_entry = entry })
= do
- -- pprTrace "cmmLayoutStack" (ppr entry_args) $ return ()
-
- -- We need liveness info. We could do removeDeadAssignments at
- -- the same time, but it buys nothing over doing cmmSink later,
- -- and costs a lot more than just cmmLocalLiveness.
- -- (graph, liveness) <- removeDeadAssignments graph0
+ -- We need liveness info. Dead assignments are removed later
+ -- by the sinking pass.
let (graph, liveness) = (graph0, cmmLocalLiveness dflags graph0)
-
- -- pprTrace "liveness" (ppr liveness) $ return ()
- let blocks = postorderDfs graph
+ blocks = postorderDfs graph
(final_stackmaps, _final_high_sp, new_blocks) <-
mfix $ \ ~(rec_stackmaps, rec_high_sp, _new_blocks) ->
@@ -206,12 +200,9 @@ cmmLayoutStack dflags procpoints entry_args
rec_stackmaps rec_high_sp blocks
new_blocks' <- mapM (lowerSafeForeignCall dflags) new_blocks
-
- -- pprTrace ("Sp HWM") (ppr _final_high_sp) $ return ()
return (ofBlockList entry new_blocks', final_stackmaps)
-
layout :: DynFlags
-> BlockSet -- proc points
-> BlockEnv CmmLocalLive -- liveness
@@ -245,15 +236,13 @@ layout dflags procpoints liveness entry entry_args final_stackmaps final_sp_high
go (b0 : bs) acc_stackmaps acc_hwm acc_blocks
= do
- let (entry0@(CmmEntry entry_lbl), middle0, last0) = blockSplit b0
+ let (entry0@(CmmEntry entry_lbl tscope), middle0, last0) = blockSplit b0
let stack0@StackMap { sm_sp = sp0 }
= mapFindWithDefault
(pprPanic "no stack map for" (ppr entry_lbl))
entry_lbl acc_stackmaps
- -- pprTrace "layout" (ppr entry_lbl <+> ppr stack0) $ return ()
-
-- (a) Update the stack map to include the effects of
-- assignments in this block
let stack1 = foldBlockNodesF (procMiddle acc_stackmaps) middle0 stack0
@@ -271,9 +260,7 @@ layout dflags procpoints liveness entry entry_args final_stackmaps final_sp_high
-- details.
(middle2, sp_off, last1, fixup_blocks, out)
<- handleLastNode dflags procpoints liveness cont_info
- acc_stackmaps stack1 middle0 last0
-
- -- pprTrace "layout(out)" (ppr out) $ return ()
+ acc_stackmaps stack1 tscope middle0 last0
-- (d) Manifest Sp: run over the nodes in the block and replace
-- CmmStackSlot with CmmLoad from Sp with a concrete offset.
@@ -395,7 +382,7 @@ getStackLoc (Young l) n stackmaps =
handleLastNode
:: DynFlags -> ProcPointSet -> BlockEnv CmmLocalLive -> BlockEnv ByteOff
- -> BlockEnv StackMap -> StackMap
+ -> BlockEnv StackMap -> StackMap -> [CmmTickScope]
-> Block CmmNode O O
-> CmmNode O C
-> UniqSM
@@ -407,7 +394,7 @@ handleLastNode
)
handleLastNode dflags procpoints liveness cont_info stackmaps
- stack0@StackMap { sm_sp = sp0 } middle last
+ stack0@StackMap { sm_sp = sp0 } tscope middle last
= case last of
-- At each return / tail call,
-- adjust Sp to point to the last argument pushed, which
@@ -505,7 +492,7 @@ handleLastNode dflags procpoints liveness cont_info stackmaps
| Just stack2 <- mapLookup l stackmaps
= do
let assigs = fixupStack stack0 stack2
- (tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 assigs
+ (tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 tscope assigs
return (l, tmp_lbl, stack2, block)
-- (b) if the successor is a proc point, save everything
@@ -514,12 +501,9 @@ handleLastNode dflags procpoints liveness cont_info stackmaps
= do
let cont_args = mapFindWithDefault 0 l cont_info
(stack2, assigs) =
- --pprTrace "first visit to proc point"
- -- (ppr l <+> ppr stack1) $
setupStackFrame dflags l liveness (sm_ret_off stack0)
- cont_args stack0
- --
- (tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 assigs
+ cont_args stack0
+ (tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 tscope assigs
return (l, tmp_lbl, stack2, block)
-- (c) otherwise, the current StackMap is the StackMap for
@@ -533,14 +517,14 @@ handleLastNode dflags procpoints liveness cont_info stackmaps
is_live (r,_) = r `elemRegSet` live
-makeFixupBlock :: DynFlags -> ByteOff -> Label -> StackMap -> [CmmNode O O]
+makeFixupBlock :: DynFlags -> ByteOff -> Label -> StackMap -> [CmmTickScope] -> [CmmNode O O]
-> UniqSM (Label, [CmmBlock])
-makeFixupBlock dflags sp0 l stack assigs
+makeFixupBlock dflags sp0 l stack tscope assigs
| null assigs && sp0 == sm_sp stack = return (l, [])
| otherwise = do
tmp_lbl <- liftM mkBlockId $ getUniqueM
let sp_off = sp0 - sm_sp stack
- block = blockJoin (CmmEntry tmp_lbl)
+ block = blockJoin (CmmEntry tmp_lbl tscope)
(maybeAddSpAdj dflags sp_off (blockFromList assigs))
(CmmBranch l)
return (tmp_lbl, [block])
@@ -682,8 +666,6 @@ allocate :: DynFlags -> ByteOff -> LocalRegSet -> StackMap
allocate dflags ret_off live stackmap@StackMap{ sm_sp = sp0
, sm_regs = regs0 }
=
- -- pprTrace "allocate" (ppr live $$ ppr stackmap) $
-
-- we only have to save regs that are not already in a slot
let to_save = filter (not . (`elemUFM` regs0)) (Set.elems live)
regs1 = filterUFM (\(r,_) -> elemRegSet r live) regs0
@@ -807,8 +789,14 @@ manifestSp dflags stackmaps stack0 sp0 sp_high
adj_pre_sp = mapExpDeep (areaToSp dflags sp0 sp_high area_off)
adj_post_sp = mapExpDeep (areaToSp dflags (sp0 - sp_off) sp_high area_off)
+ add_unwind_info | gopt Opt_Debug dflags
+ = (:) $ CmmUnwind Sp $ CmmRegOff (CmmGlobal Sp) (sp0 - wORD_SIZE dflags)
+ | otherwise
+ = id
+
final_middle = maybeAddSpAdj dflags sp_off $
blockFromList $
+ add_unwind_info $
map adj_pre_sp $
elimStackStores stack0 stackmaps area_off $
middle_pre
@@ -888,7 +876,7 @@ areaToSp _ _ _ _ other = other
-- really the job of the stack layout algorithm, hence we do it now.
optStackCheck :: CmmNode O C -> CmmNode O C
-optStackCheck n = -- Note [null stack check]
+optStackCheck n = -- Note [Always false stack check]
case n of
CmmCondBranch (CmmLit (CmmInt 0 _)) _true false -> CmmBranch false
other -> other
@@ -923,8 +911,7 @@ elimStackStores stackmap stackmaps area_off nodes
CmmStore (CmmStackSlot area m) (CmmReg (CmmLocal r))
| Just (_,off) <- lookupUFM (sm_regs stackmap) r
, area_off area + m == off
- -> -- pprTrace "eliminated a node!" (ppr r) $
- go stackmap ns
+ -> go stackmap ns
_otherwise
-> n : go (procMiddle stackmaps n stackmap) ns
@@ -1000,7 +987,7 @@ that safe foreign call is replace by an unsafe one in the Cmm graph.
lowerSafeForeignCall :: DynFlags -> CmmBlock -> UniqSM CmmBlock
lowerSafeForeignCall dflags block
- | (entry, middle, CmmForeignCall { .. }) <- blockSplit block
+ | (entry@(CmmEntry _ tscope), middle, CmmForeignCall { .. }) <- blockSplit block
= do
-- Both 'id' and 'new_base' are KindNonPtr because they're
-- RTS-only objects and are not subject to garbage collection
@@ -1038,11 +1025,11 @@ lowerSafeForeignCall dflags block
, cml_ret_args = ret_args
, cml_ret_off = ret_off }
- graph' <- lgraphOfAGraph $ suspend <*>
+ graph' <- lgraphOfAGraph ( suspend <*>
midCall <*>
resume <*>
copyout <*>
- mkLast jump
+ mkLast jump, tscope)
case toBlockList graph' of
[one] -> let (_, middle', last) = blockSplit one
diff --git a/compiler/cmm/CmmLex.x b/compiler/cmm/CmmLex.x
index bb5b4e3ae5af..d5a8067486b6 100644
--- a/compiler/cmm/CmmLex.x
+++ b/compiler/cmm/CmmLex.x
@@ -44,7 +44,7 @@ $white_no_nl = $whitechar # \n
$ascdigit = 0-9
$unidigit = \x01 -- Trick Alex into handling Unicode. See alexGetChar.
$digit = [$ascdigit $unidigit]
-$octit = 0-7
+$octit = 0-7
$hexit = [$digit A-F a-f]
$unilarge = \x03 -- Trick Alex into handling Unicode. See alexGetChar.
@@ -70,56 +70,56 @@ $namechar = [$namebegin $digit]
cmm :-
-$white_no_nl+ ;
+$white_no_nl+ ;
^\# pragma .* \n ; -- Apple GCC 3.3 CPP generates pragmas in its output
-^\# (line)? { begin line_prag }
+^\# (line)? { begin line_prag }
-- single-line line pragmas, of the form
-- # "" \n
- $digit+ { setLine line_prag1 }
- \" [^\"]* \" { setFile line_prag2 }
- .* { pop }
+ $digit+ { setLine line_prag1 }
+ \" [^\"]* \" { setFile line_prag2 }
+ .* { pop }
<0> {
- \n ;
-
- [\:\;\{\}\[\]\(\)\=\`\~\/\*\%\-\+\&\^\|\>\<\,\!] { special_char }
-
- ".." { kw CmmT_DotDot }
- "::" { kw CmmT_DoubleColon }
- ">>" { kw CmmT_Shr }
- "<<" { kw CmmT_Shl }
- ">=" { kw CmmT_Ge }
- "<=" { kw CmmT_Le }
- "==" { kw CmmT_Eq }
- "!=" { kw CmmT_Ne }
- "&&" { kw CmmT_BoolAnd }
- "||" { kw CmmT_BoolOr }
-
- P@decimal { global_regN (\n -> VanillaReg n VGcPtr) }
- R@decimal { global_regN (\n -> VanillaReg n VNonGcPtr) }
- F@decimal { global_regN FloatReg }
- D@decimal { global_regN DoubleReg }
- L@decimal { global_regN LongReg }
- Sp { global_reg Sp }
- SpLim { global_reg SpLim }
- Hp { global_reg Hp }
- HpLim { global_reg HpLim }
+ \n ;
+
+ [\:\;\{\}\[\]\(\)\=\`\~\/\*\%\-\+\&\^\|\>\<\,\!] { special_char }
+
+ ".." { kw CmmT_DotDot }
+ "::" { kw CmmT_DoubleColon }
+ ">>" { kw CmmT_Shr }
+ "<<" { kw CmmT_Shl }
+ ">=" { kw CmmT_Ge }
+ "<=" { kw CmmT_Le }
+ "==" { kw CmmT_Eq }
+ "!=" { kw CmmT_Ne }
+ "&&" { kw CmmT_BoolAnd }
+ "||" { kw CmmT_BoolOr }
+
+ P@decimal { global_regN (\n -> VanillaReg n VGcPtr) }
+ R@decimal { global_regN (\n -> VanillaReg n VNonGcPtr) }
+ F@decimal { global_regN FloatReg }
+ D@decimal { global_regN DoubleReg }
+ L@decimal { global_regN LongReg }
+ Sp { global_reg Sp }
+ SpLim { global_reg SpLim }
+ Hp { global_reg Hp }
+ HpLim { global_reg HpLim }
CCCS { global_reg CCCS }
CurrentTSO { global_reg CurrentTSO }
CurrentNursery { global_reg CurrentNursery }
- HpAlloc { global_reg HpAlloc }
- BaseReg { global_reg BaseReg }
-
- $namebegin $namechar* { name }
-
- 0 @octal { tok_octal }
- @decimal { tok_decimal }
- 0[xX] @hexadecimal { tok_hexadecimal }
- @floating_point { strtoken tok_float }
-
- \" @strchar* \" { strtoken tok_string }
+ HpAlloc { global_reg HpAlloc }
+ BaseReg { global_reg BaseReg }
+
+ $namebegin $namechar* { name }
+
+ 0 @octal { tok_octal }
+ @decimal { tok_decimal }
+ 0[xX] @hexadecimal { tok_hexadecimal }
+ @floating_point { strtoken tok_float }
+
+ \" @strchar* \" { strtoken tok_string }
}
{
@@ -160,6 +160,7 @@ data CmmToken
| CmmT_case
| CmmT_default
| CmmT_push
+ | CmmT_unwind
| CmmT_bits8
| CmmT_bits16
| CmmT_bits32
@@ -171,9 +172,9 @@ data CmmToken
| CmmT_float64
| CmmT_gcptr
| CmmT_GlobalReg GlobalReg
- | CmmT_Name FastString
- | CmmT_String String
- | CmmT_Int Integer
+ | CmmT_Name FastString
+ | CmmT_String String
+ | CmmT_Int Integer
| CmmT_Float Rational
| CmmT_EOF
deriving (Show)
@@ -196,88 +197,89 @@ kw :: CmmToken -> Action
kw tok span buf len = return (L span tok)
global_regN :: (Int -> GlobalReg) -> Action
-global_regN con span buf len
+global_regN con span buf len
= return (L span (CmmT_GlobalReg (con (fromIntegral n))))
where buf' = stepOn buf
- n = parseUnsignedInteger buf' (len-1) 10 octDecDigit
+ n = parseUnsignedInteger buf' (len-1) 10 octDecDigit
global_reg :: GlobalReg -> Action
global_reg r span buf len = return (L span (CmmT_GlobalReg r))
strtoken :: (String -> CmmToken) -> Action
-strtoken f span buf len =
+strtoken f span buf len =
return (L span $! (f $! lexemeToString buf len))
name :: Action
-name span buf len =
+name span buf len =
case lookupUFM reservedWordsFM fs of
- Just tok -> return (L span tok)
- Nothing -> return (L span (CmmT_Name fs))
+ Just tok -> return (L span tok)
+ Nothing -> return (L span (CmmT_Name fs))
where
- fs = lexemeToFastString buf len
+ fs = lexemeToFastString buf len
reservedWordsFM = listToUFM $
- map (\(x, y) -> (mkFastString x, y)) [
- ( "CLOSURE", CmmT_CLOSURE ),
- ( "INFO_TABLE", CmmT_INFO_TABLE ),
- ( "INFO_TABLE_RET", CmmT_INFO_TABLE_RET ),
- ( "INFO_TABLE_FUN", CmmT_INFO_TABLE_FUN ),
- ( "INFO_TABLE_CONSTR", CmmT_INFO_TABLE_CONSTR ),
- ( "INFO_TABLE_SELECTOR",CmmT_INFO_TABLE_SELECTOR ),
- ( "else", CmmT_else ),
- ( "export", CmmT_export ),
- ( "section", CmmT_section ),
- ( "align", CmmT_align ),
- ( "goto", CmmT_goto ),
- ( "if", CmmT_if ),
+ map (\(x, y) -> (mkFastString x, y)) [
+ ( "CLOSURE", CmmT_CLOSURE ),
+ ( "INFO_TABLE", CmmT_INFO_TABLE ),
+ ( "INFO_TABLE_RET", CmmT_INFO_TABLE_RET ),
+ ( "INFO_TABLE_FUN", CmmT_INFO_TABLE_FUN ),
+ ( "INFO_TABLE_CONSTR", CmmT_INFO_TABLE_CONSTR ),
+ ( "INFO_TABLE_SELECTOR",CmmT_INFO_TABLE_SELECTOR ),
+ ( "else", CmmT_else ),
+ ( "export", CmmT_export ),
+ ( "section", CmmT_section ),
+ ( "align", CmmT_align ),
+ ( "goto", CmmT_goto ),
+ ( "if", CmmT_if ),
( "call", CmmT_call ),
( "jump", CmmT_jump ),
( "foreign", CmmT_foreign ),
- ( "never", CmmT_never ),
- ( "prim", CmmT_prim ),
+ ( "never", CmmT_never ),
+ ( "prim", CmmT_prim ),
( "reserve", CmmT_reserve ),
( "return", CmmT_return ),
- ( "returns", CmmT_returns ),
- ( "import", CmmT_import ),
- ( "switch", CmmT_switch ),
- ( "case", CmmT_case ),
+ ( "returns", CmmT_returns ),
+ ( "import", CmmT_import ),
+ ( "switch", CmmT_switch ),
+ ( "case", CmmT_case ),
( "default", CmmT_default ),
( "push", CmmT_push ),
+ ( "unwind", CmmT_unwind ),
( "bits8", CmmT_bits8 ),
- ( "bits16", CmmT_bits16 ),
- ( "bits32", CmmT_bits32 ),
- ( "bits64", CmmT_bits64 ),
- ( "bits128", CmmT_bits128 ),
- ( "bits256", CmmT_bits256 ),
- ( "bits512", CmmT_bits512 ),
- ( "float32", CmmT_float32 ),
- ( "float64", CmmT_float64 ),
+ ( "bits16", CmmT_bits16 ),
+ ( "bits32", CmmT_bits32 ),
+ ( "bits64", CmmT_bits64 ),
+ ( "bits128", CmmT_bits128 ),
+ ( "bits256", CmmT_bits256 ),
+ ( "bits512", CmmT_bits512 ),
+ ( "float32", CmmT_float32 ),
+ ( "float64", CmmT_float64 ),
-- New forms
- ( "b8", CmmT_bits8 ),
- ( "b16", CmmT_bits16 ),
- ( "b32", CmmT_bits32 ),
- ( "b64", CmmT_bits64 ),
- ( "b128", CmmT_bits128 ),
- ( "b256", CmmT_bits256 ),
- ( "b512", CmmT_bits512 ),
- ( "f32", CmmT_float32 ),
- ( "f64", CmmT_float64 ),
- ( "gcptr", CmmT_gcptr )
- ]
-
-tok_decimal span buf len
+ ( "b8", CmmT_bits8 ),
+ ( "b16", CmmT_bits16 ),
+ ( "b32", CmmT_bits32 ),
+ ( "b64", CmmT_bits64 ),
+ ( "b128", CmmT_bits128 ),
+ ( "b256", CmmT_bits256 ),
+ ( "b512", CmmT_bits512 ),
+ ( "f32", CmmT_float32 ),
+ ( "f64", CmmT_float64 ),
+ ( "gcptr", CmmT_gcptr )
+ ]
+
+tok_decimal span buf len
= return (L span (CmmT_Int $! parseUnsignedInteger buf len 10 octDecDigit))
-tok_octal span buf len
+tok_octal span buf len
= return (L span (CmmT_Int $! parseUnsignedInteger (offsetBytes 1 buf) (len-1) 8 octDecDigit))
-tok_hexadecimal span buf len
+tok_hexadecimal span buf len
= return (L span (CmmT_Int $! parseUnsignedInteger (offsetBytes 2 buf) (len-2) 16 hexDigit))
tok_float str = CmmT_Float $! readRational str
tok_string str = CmmT_String (read str)
- -- urk, not quite right, but it'll do for now
+ -- urk, not quite right, but it'll do for now
-- -----------------------------------------------------------------------------
-- Line pragmas
@@ -286,7 +288,7 @@ setLine :: Int -> Action
setLine code span buf len = do
let line = parseUnsignedInteger buf len 10 octDecDigit
setSrcLoc (mkRealSrcLoc (srcSpanFile span) (fromIntegral line - 1) 1)
- -- subtract one: the line number refers to the *following* line
+ -- subtract one: the line number refers to the *following* line
-- trace ("setLine " ++ show line) $ do
popLexState
pushLexState code
@@ -316,17 +318,17 @@ lexToken = do
sc <- getLexState
case alexScan inp sc of
AlexEOF -> do let span = mkRealSrcSpan loc1 loc1
- setLastToken span 0
- return (L span CmmT_EOF)
+ setLastToken span 0
+ return (L span CmmT_EOF)
AlexError (loc2,_) -> do failLocMsgP loc1 loc2 "lexical error"
AlexSkip inp2 _ -> do
- setInput inp2
- lexToken
+ setInput inp2
+ lexToken
AlexToken inp2@(end,buf2) len t -> do
- setInput inp2
- let span = mkRealSrcSpan loc1 end
- span `seq` setLastToken span len
- t span buf len
+ setInput inp2
+ let span = mkRealSrcSpan loc1 end
+ span `seq` setLastToken span len
+ t span buf len
-- -----------------------------------------------------------------------------
-- Monad stuff
@@ -351,7 +353,7 @@ alexGetByte (loc,s)
where c = currentChar s
b = fromIntegral $ ord $ c
loc' = advanceSrcLoc loc c
- s' = stepOn s
+ s' = stepOn s
getInput :: P AlexInput
getInput = P $ \s@PState{ loc=l, buffer=b } -> POk s (l,b)
diff --git a/compiler/cmm/CmmLint.hs b/compiler/cmm/CmmLint.hs
index 970ce6814939..28a8d89ec81f 100644
--- a/compiler/cmm/CmmLint.hs
+++ b/compiler/cmm/CmmLint.hs
@@ -138,6 +138,8 @@ notNodeReg _ = True
lintCmmMiddle :: CmmNode O O -> CmmLint ()
lintCmmMiddle node = case node of
CmmComment _ -> return ()
+ CmmTick _ -> return ()
+ CmmUnwind{} -> return ()
CmmAssign reg expr -> do
dflags <- getDynFlags
diff --git a/compiler/cmm/CmmLive.hs b/compiler/cmm/CmmLive.hs
index 7d674b76a224..dfacd139b605 100644
--- a/compiler/cmm/CmmLive.hs
+++ b/compiler/cmm/CmmLive.hs
@@ -1,7 +1,9 @@
+{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE ScopedTypeVariables #-}
+-- See Note [Deprecations in Hoopl] in Hoopl module
{-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
module CmmLive
@@ -11,11 +13,9 @@ module CmmLive
, cmmGlobalLiveness
, liveLattice
, noLiveOnEntry, xferLive, gen, kill, gen_kill
- , removeDeadAssignments
)
where
-import UniqSupply
import DynFlags
import BlockId
import Cmm
@@ -98,30 +98,3 @@ xferLive dflags = mkBTransfer3 fst mid lst
mid n f = gen_kill dflags n f
lst :: CmmNode O C -> FactBase (CmmLive r) -> CmmLive r
lst n f = gen_kill dflags n $ joinOutFacts liveLattice n f
-
------------------------------------------------------------------------------
--- Removing assignments to dead variables
------------------------------------------------------------------------------
-
-removeDeadAssignments :: DynFlags -> CmmGraph
- -> UniqSM (CmmGraph, BlockEnv CmmLocalLive)
-removeDeadAssignments dflags g =
- dataflowPassBwd g [] $ analRewBwd liveLattice (xferLive dflags) rewrites
- where rewrites = mkBRewrite3 nothing middle nothing
- -- SDM: no need for deepBwdRw here, we only rewrite to empty
- -- Beware: deepBwdRw with one polymorphic function seems more
- -- reasonable here, but GHC panics while compiling, see bug
- -- #4045.
- middle :: CmmNode O O -> Fact O CmmLocalLive -> CmmReplGraph O O
- middle (CmmAssign (CmmLocal reg') _) live
- | not (reg' `elemRegSet` live)
- = return $ Just emptyGraph
- -- XXX maybe this should be somewhere else...
- middle (CmmAssign lhs (CmmReg rhs)) _ | lhs == rhs
- = return $ Just emptyGraph
- middle (CmmStore lhs (CmmLoad rhs _)) _ | lhs == rhs
- = return $ Just emptyGraph
- middle _ _ = return Nothing
-
- nothing :: CmmNode e x -> Fact x CmmLocalLive -> CmmReplGraph e x
- nothing _ _ = return Nothing
diff --git a/compiler/cmm/CmmMachOp.hs b/compiler/cmm/CmmMachOp.hs
index 684a4b97291b..a7b2c85175de 100644
--- a/compiler/cmm/CmmMachOp.hs
+++ b/compiler/cmm/CmmMachOp.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CmmMachOp
( MachOp(..)
@@ -18,6 +19,9 @@ module CmmMachOp
-- CallishMachOp
, CallishMachOp(..), callishMachOpHints
, pprCallishMachOp
+
+ -- Atomic read-modify-write
+ , AtomicMachOp(..)
)
where
@@ -545,9 +549,28 @@ data CallishMachOp
| MO_Memmove
| MO_PopCnt Width
+ | MO_Clz Width
+ | MO_Ctz Width
+
| MO_BSwap Width
+
+ -- Atomic read-modify-write.
+ | MO_AtomicRMW Width AtomicMachOp
+ | MO_AtomicRead Width
+ | MO_AtomicWrite Width
+ | MO_Cmpxchg Width
deriving (Eq, Show)
+-- | The operation to perform atomically.
+data AtomicMachOp =
+ AMO_Add
+ | AMO_Sub
+ | AMO_And
+ | AMO_Nand
+ | AMO_Or
+ | AMO_Xor
+ deriving (Eq, Show)
+
pprCallishMachOp :: CallishMachOp -> SDoc
pprCallishMachOp mo = text (show mo)
diff --git a/compiler/cmm/CmmNode.hs b/compiler/cmm/CmmNode.hs
index 5c520d389978..d7a83c41dcd1 100644
--- a/compiler/cmm/CmmNode.hs
+++ b/compiler/cmm/CmmNode.hs
@@ -1,16 +1,22 @@
--- CmmNode type for representation using Hoopl graphs.
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ExplicitForAll #-}
{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE UndecidableInstances #-}
+-- CmmNode type for representation using Hoopl graphs.
+
module CmmNode (
- CmmNode(..), CmmFormal, CmmActual,
+ CmmNode(..), CmmFormal, CmmActual, CmmTickScope,
UpdFrameOffset, Convention(..),
ForeignConvention(..), ForeignTarget(..), foreignTargetHints,
CmmReturnInfo(..),
mapExp, mapExpDeep, wrapRecExp, foldExp, foldExpDeep, wrapRecExpf,
- mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors
+ mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors,
+ combineTickScopes,
) where
import CodeGen.Platform
@@ -19,6 +25,8 @@ import DynFlags
import FastString
import ForeignCall
import SMRep
+import CoreSyn (RawTickish)
+import qualified Unique as U
import Compiler.Hoopl
import Data.Maybe
@@ -32,10 +40,13 @@ import Prelude hiding (succ)
#define ULabel {-# UNPACK #-} !Label
data CmmNode e x where
- CmmEntry :: ULabel -> CmmNode C O
+ CmmEntry :: ULabel -> [CmmTickScope] -> CmmNode C O
CmmComment :: FastString -> CmmNode O O
+ CmmTick :: !RawTickish -> CmmNode O O
+ CmmUnwind :: !GlobalReg -> !CmmExpr -> CmmNode O O
+
CmmAssign :: !CmmReg -> !CmmExpr -> CmmNode O O
-- Assign to register
@@ -201,7 +212,7 @@ deriving instance Eq (CmmNode e x)
-- Hoopl instances of CmmNode
instance NonLocal CmmNode where
- entryLabel (CmmEntry l) = l
+ entryLabel (CmmEntry l _) = l
successors (CmmBranch l) = [l]
successors (CmmCondBranch {cml_true=t, cml_false=f}) = [f, t] -- meets layout constraint
@@ -218,6 +229,8 @@ type CmmFormal = LocalReg
type UpdFrameOffset = ByteOff
+type CmmTickScope = U.Unique
+
-- | A convention maps a list of values (function arguments or return
-- values) to registers or stack locations.
data Convention
@@ -430,8 +443,10 @@ wrapRecExp f (CmmLoad addr ty) = f (CmmLoad (wrapRecExp f addr) ty)
wrapRecExp f e = f e
mapExp :: (CmmExpr -> CmmExpr) -> CmmNode e x -> CmmNode e x
-mapExp _ f@(CmmEntry _) = f
+mapExp _ f@(CmmEntry{}) = f
mapExp _ m@(CmmComment _) = m
+mapExp _ m@(CmmTick _) = m
+mapExp _ m@(CmmUnwind _ _) = m
mapExp f (CmmAssign r e) = CmmAssign r (f e)
mapExp f (CmmStore addr e) = CmmStore (f addr) (f e)
mapExp f (CmmUnsafeForeignCall tgt fs as) = CmmUnsafeForeignCall (mapForeignTarget f tgt) fs (map f as)
@@ -459,8 +474,10 @@ wrapRecExpM f n@(CmmLoad addr ty) = maybe (f n) (f . flip CmmLoad ty) (wrapRecE
wrapRecExpM f e = f e
mapExpM :: (CmmExpr -> Maybe CmmExpr) -> CmmNode e x -> Maybe (CmmNode e x)
-mapExpM _ (CmmEntry _) = Nothing
+mapExpM _ (CmmEntry{}) = Nothing
mapExpM _ (CmmComment _) = Nothing
+mapExpM _ (CmmTick _) = Nothing
+mapExpM _ (CmmUnwind _ _) = Nothing
mapExpM f (CmmAssign r e) = CmmAssign r `fmap` f e
mapExpM f (CmmStore addr e) = (\[addr', e'] -> CmmStore addr' e') `fmap` mapListM f [addr, e]
mapExpM _ (CmmBranch _) = Nothing
@@ -512,6 +529,8 @@ wrapRecExpf f e z = f e z
foldExp :: (CmmExpr -> z -> z) -> CmmNode e x -> z -> z
foldExp _ (CmmEntry {}) z = z
foldExp _ (CmmComment {}) z = z
+foldExp _ (CmmTick {}) z = z
+foldExp _ (CmmUnwind {}) z = z
foldExp f (CmmAssign _ e) z = f e z
foldExp f (CmmStore addr e) z = f addr $ f e z
foldExp f (CmmUnsafeForeignCall t _ as) z = foldr f (foldExpForeignTarget f t z) as
@@ -532,3 +551,11 @@ mapSuccessors f (CmmCondBranch p y n) = CmmCondBranch p (f y) (f n)
mapSuccessors f (CmmSwitch e arms) = CmmSwitch e (map (fmap f) arms)
mapSuccessors _ n = n
+-- -----------------------------------------------------------------------------
+
+combineTickScopes :: [CmmTickScope] -> [CmmTickScope] -> [CmmTickScope]
+combineTickScopes sc0 sc1
+ | l0 > l1 = take (l1 - common) sc1 ++ sc0
+ | otherwise = take (l0 - common) sc0 ++ sc1
+ where l0 = length sc0; l1 = length sc1
+ common = length $ takeWhile id $ zipWith (==) (reverse sc0) (reverse sc1)
diff --git a/compiler/cmm/CmmOpt.hs b/compiler/cmm/CmmOpt.hs
index 54dbbebae67a..84499b97de93 100644
--- a/compiler/cmm/CmmOpt.hs
+++ b/compiler/cmm/CmmOpt.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Cmm optimisation
diff --git a/compiler/cmm/CmmParse.y b/compiler/cmm/CmmParse.y
index 5f2c4d86be48..37c18af711f7 100644
--- a/compiler/cmm/CmmParse.y
+++ b/compiler/cmm/CmmParse.y
@@ -209,7 +209,7 @@ import StgCmmExtCode
import CmmCallConv
import StgCmmProf
import StgCmmHeap
-import StgCmmMonad hiding ( getCode, getCodeR, emitLabel, emit, emitStore
+import StgCmmMonad hiding ( getCode, getCodeR, getCodeScoped, emitLabel, emit, emitStore
, emitAssign, emitOutOfLine, withUpdFrameOff
, getUpdFrameOff )
import qualified StgCmmMonad as F
@@ -220,6 +220,7 @@ import StgCmmClosure
import StgCmmLayout hiding (ArgRep(..))
import StgCmmTicky
import StgCmmBind ( emitBlackHoleCode, emitUpdateFrame )
+import CoreSyn ( Tickish(SourceNote) )
import CmmOpt
import MkGraph
@@ -324,6 +325,7 @@ import Data.Maybe
'case' { L _ (CmmT_case) }
'default' { L _ (CmmT_default) }
'push' { L _ (CmmT_push) }
+ 'unwind' { L _ (CmmT_unwind) }
'bits8' { L _ (CmmT_bits8) }
'bits16' { L _ (CmmT_bits16) }
'bits32' { L _ (CmmT_bits32) }
@@ -428,10 +430,12 @@ lits :: { [CmmParse CmmExpr] }
cmmproc :: { CmmParse () }
: info maybe_conv maybe_formals maybe_body
{ do ((entry_ret_label, info, stk_formals, formals), agraph) <-
- getCodeR $ loopDecls $ do {
+ getCodeScoped $ loopDecls $ do {
(entry_ret_label, info, stk_formals) <- $1;
+ dflags <- getDynFlags;
formals <- sequence (fromMaybe [] $3);
- $4;
+ withName (showSDoc dflags (ppr entry_ret_label))
+ $4;
return (entry_ret_label, info, stk_formals, formals) }
let do_layout = isJust $3
code (emitProcWithStackFrame $2 info
@@ -444,7 +448,7 @@ maybe_conv :: { Convention }
maybe_body :: { CmmParse () }
: ';' { return () }
- | '{' body '}' { $2 }
+ | '{' body '}' { withSourceNote $1 $3 $2 }
info :: { CmmParse (CLabel, Maybe CmmInfoTable, [LocalReg]) }
: NAME
@@ -573,7 +577,7 @@ importName
-- A label imported with an explicit packageId.
| STRING NAME
- { ($2, mkCmmCodeLabel (fsToPackageId (mkFastString $1)) $2) }
+ { ($2, mkCmmCodeLabel (fsToPackageKey (mkFastString $1)) $2) }
names :: { [FastString] }
@@ -626,11 +630,13 @@ stmt :: { CmmParse () }
| 'if' bool_expr 'goto' NAME
{ do l <- lookupLabel $4; cmmRawIf $2 l }
| 'if' bool_expr '{' body '}' else
- { cmmIfThenElse $2 $4 $6 }
+ { cmmIfThenElse $2 (withSourceNote $3 $5 $4) $6 }
| 'push' '(' exprs0 ')' maybe_body
{ pushStackFrame $3 $5 }
| 'reserve' expr '=' lreg maybe_body
{ reserveStackFrame $2 $4 $5 }
+ | 'unwind' GLOBALREG '=' expr
+ { $4 >>= code . emitUnwind $2 }
foreignLabel :: { CmmParse CmmExpr }
: NAME { return (CmmLit (CmmLabel (mkForeignLabel $1 Nothing ForeignLabelInThisPackage IsFunction))) }
@@ -679,7 +685,7 @@ arm :: { CmmParse ([Int],Either BlockId (CmmParse ())) }
: 'case' ints ':' arm_body { do b <- $4; return ($2, b) }
arm_body :: { CmmParse (Either BlockId (CmmParse ())) }
- : '{' body '}' { return (Right $2) }
+ : '{' body '}' { return (Right (withSourceNote $1 $3 $2)) }
| 'goto' NAME ';' { do l <- lookupLabel $2; return (Left l) }
ints :: { [Int] }
@@ -687,7 +693,7 @@ ints :: { [Int] }
| INT ',' ints { fromIntegral $1 : $3 }
default :: { Maybe (CmmParse ()) }
- : 'default' ':' '{' body '}' { Just $4 }
+ : 'default' ':' '{' body '}' { Just (withSourceNote $3 $5 $4) }
-- taking a few liberties with the C-- syntax here; C-- doesn't have
-- 'default' branches
| {- empty -} { Nothing }
@@ -696,7 +702,7 @@ default :: { Maybe (CmmParse ()) }
-- CmmNode does.
else :: { CmmParse () }
: {- empty -} { return () }
- | 'else' '{' body '}' { $3 }
+ | 'else' '{' body '}' { withSourceNote $2 $4 $3 }
-- we have to write this out longhand so that Happy's precedence rules
-- can kick in.
@@ -1101,7 +1107,7 @@ profilingInfo dflags desc_str ty_str
else ProfilingInfo (stringToWord8s desc_str)
(stringToWord8s ty_str)
-staticClosure :: PackageId -> FastString -> FastString -> [CmmLit] -> CmmParse ()
+staticClosure :: PackageKey -> FastString -> FastString -> [CmmLit] -> CmmParse ()
staticClosure pkg cl_label info payload
= do dflags <- getDynFlags
let lits = mkStaticClosure dflags (mkCmmInfoLabel pkg info) dontCareCCS payload [] [] []
@@ -1275,6 +1281,15 @@ emitCond (e1 `BoolAnd` e2) then_id = do
emitCond e2 then_id
emitLabel else_id
+-- -----------------------------------------------------------------------------
+-- Source code notes
+
+withSourceNote :: Located a -> Located b -> CmmParse c -> CmmParse c
+withSourceNote a b parse = do
+ name <- getName
+ case combineSrcSpans (getLoc a) (getLoc b) of
+ RealSrcSpan span -> code (emitTick (SourceNote span name)) >> parse
+ _other -> parse
-- -----------------------------------------------------------------------------
-- Table jumps
@@ -1321,7 +1336,7 @@ doSwitch mb_range scrut arms deflt
forkLabelledCode :: CmmParse () -> CmmParse BlockId
forkLabelledCode p = do
- ag <- getCode p
+ (_,ag) <- getCodeScoped p
l <- newBlockId
emitOutOfLine l ag
return l
@@ -1334,7 +1349,7 @@ forkLabelledCode p = do
initEnv :: DynFlags -> Env
initEnv dflags = listToUFM [
( fsLit "SIZEOF_StgHeader",
- VarN (CmmLit (CmmInt (fromIntegral (fixedHdrSize dflags * wORD_SIZE dflags)) (wordWidth dflags)) )),
+ VarN (CmmLit (CmmInt (fromIntegral (fixedHdrSize dflags)) (wordWidth dflags)) )),
( fsLit "SIZEOF_StgInfoTable",
VarN (CmmLit (CmmInt (fromIntegral (stdInfoTableSizeB dflags)) (wordWidth dflags)) ))
]
@@ -1354,7 +1369,8 @@ parseCmmFile dflags filename = do
return ((emptyBag, unitBag msg), Nothing)
POk pst code -> do
st <- initC
- let (cmm,_) = runC dflags no_module st (getCmm (unEC code (initEnv dflags) [] >> return ()))
+ let fcode = getCmm $ unEC code "global" (initEnv dflags) [] >> return ()
+ (cmm,_) = runC dflags no_module st fcode
let ms = getMessages pst
if (errorsFound dflags ms)
then return (ms, Nothing)
diff --git a/compiler/cmm/CmmPipeline.hs b/compiler/cmm/CmmPipeline.hs
index 5c2d54d5ba48..af4f62a4a8d4 100644
--- a/compiler/cmm/CmmPipeline.hs
+++ b/compiler/cmm/CmmPipeline.hs
@@ -1,6 +1,4 @@
-{-# LANGUAGE NoMonoLocalBinds #-}
--- Norman likes local bindings
--- If this module lives on I'd like to get rid of this extension in due course
+{-# LANGUAGE BangPatterns #-}
module CmmPipeline (
-- | Converts C-- with an implicit stack and native C-- calls into
@@ -40,8 +38,6 @@ cmmPipeline :: HscEnv -- Compilation env including
cmmPipeline hsc_env topSRT prog =
do let dflags = hsc_dflags hsc_env
- showPass dflags "CPSZ"
-
tops <- {-# SCC "tops" #-} mapM (cpsTop hsc_env) prog
(topSRT, cmms) <- {-# SCC "doSRTs" #-} doSRTs dflags topSRT tops
@@ -88,10 +84,6 @@ cpsTop hsc_env proc =
else
return call_pps
- let noncall_pps = proc_points `setDifference` call_pps
- when (not (setNull noncall_pps) && dopt Opt_D_dump_cmm dflags) $
- pprTrace "Non-call proc points: " (ppr noncall_pps) $ return ()
-
----------- Layout the stack and manifest Sp ----------------------------
(g, stackmaps) <-
{-# SCC "layoutStack" #-}
@@ -109,57 +101,40 @@ cpsTop hsc_env proc =
let cafEnv = {-# SCC "cafAnal" #-} cafAnal g
dumpIfSet_dyn dflags Opt_D_dump_cmm "CAFEnv" (ppr cafEnv)
- if splitting_proc_points
- then do
- ------------- Split into separate procedures -----------------------
- pp_map <- {-# SCC "procPointAnalysis" #-} runUniqSM $
- procPointAnalysis proc_points g
- dumpWith dflags Opt_D_dump_cmm_procmap "procpoint map" pp_map
- gs <- {-# SCC "splitAtProcPoints" #-} runUniqSM $
- splitAtProcPoints dflags l call_pps proc_points pp_map
- (CmmProc h l v g)
- dumps Opt_D_dump_cmm_split "Post splitting" gs
-
- ------------- Populate info tables with stack info -----------------
- gs <- {-# SCC "setInfoTableStackMap" #-}
- return $ map (setInfoTableStackMap dflags stackmaps) gs
- dumps Opt_D_dump_cmm_info "after setInfoTableStackMap" gs
-
- ----------- Control-flow optimisations -----------------------------
- gs <- {-# SCC "cmmCfgOpts(2)" #-}
- return $ if optLevel dflags >= 1
- then map (cmmCfgOptsProc splitting_proc_points) gs
- else gs
- gs <- return (map removeUnreachableBlocksProc gs)
- -- Note [unreachable blocks]
- dumps Opt_D_dump_cmm_cfg "Post control-flow optimisations" gs
-
- return (cafEnv, gs)
-
- else do
- -- attach info tables to return points
- g <- return $ attachContInfoTables call_pps (CmmProc h l v g)
-
- ------------- Populate info tables with stack info -----------------
- g <- {-# SCC "setInfoTableStackMap" #-}
- return $ setInfoTableStackMap dflags stackmaps g
- dump' Opt_D_dump_cmm_info "after setInfoTableStackMap" g
-
- ----------- Control-flow optimisations -----------------------------
- g <- {-# SCC "cmmCfgOpts(2)" #-}
- return $ if optLevel dflags >= 1
- then cmmCfgOptsProc splitting_proc_points g
- else g
- g <- return (removeUnreachableBlocksProc g)
- -- Note [unreachable blocks]
- dump' Opt_D_dump_cmm_cfg "Post control-flow optimisations" g
-
- return (cafEnv, [g])
+ g <- if splitting_proc_points
+ then do
+ ------------- Split into separate procedures -----------------------
+ pp_map <- {-# SCC "procPointAnalysis" #-} runUniqSM $
+ procPointAnalysis proc_points g
+ dumpWith dflags Opt_D_dump_cmm_procmap "procpoint map" pp_map
+ g <- {-# SCC "splitAtProcPoints" #-} runUniqSM $
+ splitAtProcPoints dflags l call_pps proc_points pp_map
+ (CmmProc h l v g)
+ dumps Opt_D_dump_cmm_split "Post splitting" g
+ return g
+ else do
+ -- attach info tables to return points
+ return $ [attachContInfoTables call_pps (CmmProc h l v g)]
+
+ ------------- Populate info tables with stack info -----------------
+ g <- {-# SCC "setInfoTableStackMap" #-}
+ return $ map (setInfoTableStackMap dflags stackmaps) g
+ dumps Opt_D_dump_cmm_info "after setInfoTableStackMap" g
+
+ ----------- Control-flow optimisations -----------------------------
+ g <- {-# SCC "cmmCfgOpts(2)" #-}
+ return $ if optLevel dflags >= 1
+ then map (cmmCfgOptsProc splitting_proc_points) g
+ else g
+ g <- return (map removeUnreachableBlocksProc g)
+ -- See Note [unreachable blocks]
+ dumps Opt_D_dump_cmm_cfg "Post control-flow optimisations" g
+
+ return (cafEnv, g)
where dflags = hsc_dflags hsc_env
platform = targetPlatform dflags
dump = dumpGraph dflags
- dump' = dumpWith dflags
dumps flag name
= mapM_ (dumpWith dflags flag name)
@@ -351,10 +326,9 @@ _GLOBAL_OFFSET_TABLE_, regardless of which entry point we arrived via.
{- Note [unreachable blocks]
The control-flow optimiser sometimes leaves unreachable blocks behind
-containing junk code. If these blocks make it into the native code
-generator then they trigger a register allocator panic because they
-refer to undefined LocalRegs, so we must eliminate any unreachable
-blocks before passing the code onwards.
+containing junk code. These aren't necessarily a problem, but
+removing them is good because it might save time in the native code
+generator later.
-}
@@ -383,4 +357,3 @@ dumpWith dflags flag txt g = do
dumpIfSet_dyn dflags flag txt (ppr g)
when (not (dopt flag dflags)) $
dumpIfSet_dyn dflags Opt_D_dump_cmm txt (ppr g)
-
diff --git a/compiler/cmm/CmmProcPoint.hs b/compiler/cmm/CmmProcPoint.hs
index 5f9c27fe7a28..3b5220e805da 100644
--- a/compiler/cmm/CmmProcPoint.hs
+++ b/compiler/cmm/CmmProcPoint.hs
@@ -145,7 +145,7 @@ forward :: FwdTransfer CmmNode Status
forward = mkFTransfer3 first middle last
where
first :: CmmNode C O -> Status -> Status
- first (CmmEntry id) ProcPoint = ReachedBy $ setSingleton id
+ first (CmmEntry id _) ProcPoint = ReachedBy $ setSingleton id
first _ x = x
middle _ x = x
@@ -282,7 +282,7 @@ splitAtProcPoints dflags entry_label callPPs procPoints procMap
-- and replace branches to procpoints with branches to the jump-off blocks
let add_jump_block (env, bs) (pp, l) =
do bid <- liftM mkBlockId getUniqueM
- let b = blockJoin (CmmEntry bid) emptyBlock jump
+ let b = blockJoin (CmmEntry bid []) emptyBlock jump
live = ppLiveness pp
jump = CmmCall (CmmLit (CmmLabel l)) Nothing live 0 0 0
return (mapInsert pp bid env, b : bs)
diff --git a/compiler/cmm/CmmRewriteAssignments.hs b/compiler/cmm/CmmRewriteAssignments.hs
index 3c0a05b949dd..621d90dbbb8c 100644
--- a/compiler/cmm/CmmRewriteAssignments.hs
+++ b/compiler/cmm/CmmRewriteAssignments.hs
@@ -381,6 +381,10 @@ middleAssignment dflags (Plain n@(CmmUnsafeForeignCall{})) assign
middleAssignment _ (Plain (CmmComment {})) assign
= assign
+middleAssignment _ (Plain (CmmTick {})) assign
+ = assign
+middleAssignment _ (Plain (CmmUnwind {})) assign
+ = assign
-- Assumptions:
-- * Writes using Hp do not overlap with any other memory locations
diff --git a/compiler/cmm/CmmSink.hs b/compiler/cmm/CmmSink.hs
index c404a2e93244..4dced9afd229 100644
--- a/compiler/cmm/CmmSink.hs
+++ b/compiler/cmm/CmmSink.hs
@@ -9,6 +9,7 @@ import BlockId
import CmmLive
import CmmUtils
import Hoopl
+import CodeGen.Platform
import DynFlags
import UniqFM
@@ -16,6 +17,7 @@ import PprCmm ()
import Data.List (partition)
import qualified Data.Set as Set
+import Data.Maybe
-- -----------------------------------------------------------------------------
-- Sinking and inlining
@@ -197,7 +199,7 @@ cmmSink dflags graph = ofBlockList (g_entry graph) $ sink mapEmpty $ blocks
drop_if a@(r,rhs,_) live_sets = (should_drop, live_sets')
where
should_drop = conflicts dflags a final_last
- || not (isTrivial rhs) && live_in_multi live_sets r
+ || not (isTrivial dflags rhs) && live_in_multi live_sets r
|| r `Set.member` live_in_joins
live_sets' | should_drop = live_sets
@@ -219,26 +221,24 @@ cmmSink dflags graph = ofBlockList (g_entry graph) $ sink mapEmpty $ blocks
-- small: an expression we don't mind duplicating
isSmall :: CmmExpr -> Bool
-isSmall (CmmReg (CmmLocal _)) = True -- not globals, we want to coalesce them instead* See below
+isSmall (CmmReg (CmmLocal _)) = True --
isSmall (CmmLit _) = True
isSmall (CmmMachOp (MO_Add _) [x,y]) = isTrivial x && isTrivial y
isSmall (CmmRegOff (CmmLocal _) _) = True
isSmall _ = False
-
-Coalesce global registers? What does that mean? We observed no decrease
-in performance comming from inlining of global registers, hence we do it now
-(see isTrivial function). Ideally we'd like to measure performance using
-some tool like perf or VTune and make decisions what to inline based on that.
-}
--
-- We allow duplication of trivial expressions: registers (both local and
-- global) and literals.
--
-isTrivial :: CmmExpr -> Bool
-isTrivial (CmmReg _) = True
-isTrivial (CmmLit _) = True
-isTrivial _ = False
+isTrivial :: DynFlags -> CmmExpr -> Bool
+isTrivial _ (CmmReg (CmmLocal _)) = True
+isTrivial dflags (CmmReg (CmmGlobal r)) = -- see Note [Inline GlobalRegs?]
+ isJust (globalRegMaybe (targetPlatform dflags) r)
+ -- GlobalRegs that are loads from BaseReg are not trivial
+isTrivial _ (CmmLit _) = True
+isTrivial _ _ = False
--
-- annotate each node with the set of registers live *after* the node
@@ -401,7 +401,7 @@ tryToInline dflags live node assigs = go usages node [] assigs
| cannot_inline = dont_inline
| occurs_none = discard -- Note [discard during inlining]
| occurs_once = inline_and_discard
- | isTrivial rhs = inline_and_keep
+ | isTrivial dflags rhs = inline_and_keep
| otherwise = dont_inline
where
inline_and_discard = go usages' inl_node skipped rest
@@ -516,7 +516,7 @@ conflicts :: DynFlags -> Assignment -> CmmNode O x -> Bool
conflicts dflags (r, rhs, addr) node
-- (1) node defines registers used by rhs of assignment. This catches
- -- assignmnets and all three kinds of calls. See Note [Sinking and calls]
+ -- assignments and all three kinds of calls. See Note [Sinking and calls]
| globalRegistersConflict dflags rhs node = True
| localRegistersConflict dflags rhs node = True
@@ -650,6 +650,10 @@ data AbsMem
-- perhaps we ought to have a special annotation for calls that can
-- modify heap/stack memory. For now we just use the conservative
-- definition here.
+--
+-- Some CallishMachOp imply a memory barrier e.g. AtomicRMW and
+-- therefore we should never float any memory operations across one of
+-- these calls.
bothMems :: AbsMem -> AbsMem -> AbsMem
@@ -695,3 +699,91 @@ regAddr _ (CmmGlobal Hp) _ _ = HeapMem
regAddr _ (CmmGlobal CurrentTSO) _ _ = HeapMem -- important for PrimOps
regAddr dflags r _ _ | isGcPtrType (cmmRegType dflags r) = HeapMem -- yay! GCPtr pays for itself
regAddr _ _ _ _ = AnyMem
+
+{-
+Note [Inline GlobalRegs?]
+
+Should we freely inline GlobalRegs?
+
+Actually it doesn't make a huge amount of difference either way, so we
+*do* currently treat GlobalRegs as "trivial" and inline them
+everywhere, but for what it's worth, here is what I discovered when I
+(SimonM) looked into this:
+
+Common sense says we should not inline GlobalRegs, because when we
+have
+
+ x = R1
+
+the register allocator will coalesce this assignment, generating no
+code, and simply record the fact that x is bound to $rbx (or
+whatever). Furthermore, if we were to sink this assignment, then the
+range of code over which R1 is live increases, and the range of code
+over which x is live decreases. All things being equal, it is better
+for x to be live than R1, because R1 is a fixed register whereas x can
+live in any register. So we should neither sink nor inline 'x = R1'.
+
+However, not inlining GlobalRegs can have surprising
+consequences. e.g. (cgrun020)
+
+ c3EN:
+ _s3DB::P64 = R1;
+ _c3ES::P64 = _s3DB::P64 & 7;
+ if (_c3ES::P64 >= 2) goto c3EU; else goto c3EV;
+ c3EU:
+ _s3DD::P64 = P64[_s3DB::P64 + 6];
+ _s3DE::P64 = P64[_s3DB::P64 + 14];
+ I64[Sp - 8] = c3F0;
+ R1 = _s3DE::P64;
+ P64[Sp] = _s3DD::P64;
+
+inlining the GlobalReg gives:
+
+ c3EN:
+ if (R1 & 7 >= 2) goto c3EU; else goto c3EV;
+ c3EU:
+ I64[Sp - 8] = c3F0;
+ _s3DD::P64 = P64[R1 + 6];
+ R1 = P64[R1 + 14];
+ P64[Sp] = _s3DD::P64;
+
+but if we don't inline the GlobalReg, instead we get:
+
+ _s3DB::P64 = R1;
+ if (_s3DB::P64 & 7 >= 2) goto c3EU; else goto c3EV;
+ c3EU:
+ I64[Sp - 8] = c3F0;
+ R1 = P64[_s3DB::P64 + 14];
+ P64[Sp] = P64[_s3DB::P64 + 6];
+
+This looks better - we managed to inline _s3DD - but in fact it
+generates an extra reg-reg move:
+
+.Lc3EU:
+ movq $c3F0_info,-8(%rbp)
+ movq %rbx,%rax
+ movq 14(%rbx),%rbx
+ movq 6(%rax),%rax
+ movq %rax,(%rbp)
+
+because _s3DB is now live across the R1 assignment, we lost the
+benefit of coalescing.
+
+Who is at fault here? Perhaps if we knew that _s3DB was an alias for
+R1, then we would not sink a reference to _s3DB past the R1
+assignment. Or perhaps we *should* do that - we might gain by sinking
+it, despite losing the coalescing opportunity.
+
+Sometimes not inlining global registers wins by virtue of the rule
+about not inlining into arguments of a foreign call, e.g. (T7163) this
+is what happens when we inlined F1:
+
+ _s3L2::F32 = F1;
+ _c3O3::F32 = %MO_F_Mul_W32(F1, 10.0 :: W32);
+ (_s3L7::F32) = call "ccall" arg hints: [] result hints: [] rintFloat(_c3O3::F32);
+
+but if we don't inline F1:
+
+ (_s3L7::F32) = call "ccall" arg hints: [] result hints: [] rintFloat(%MO_F_Mul_W32(_s3L2::F32,
+ 10.0 :: W32));
+-}
diff --git a/compiler/cmm/CmmType.hs b/compiler/cmm/CmmType.hs
index d03c2dc0b957..37d92c207d4f 100644
--- a/compiler/cmm/CmmType.hs
+++ b/compiler/cmm/CmmType.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CmmType
( CmmType -- Abstract
diff --git a/compiler/cmm/CmmUtils.hs b/compiler/cmm/CmmUtils.hs
index f6d1ddde58cd..308cb751fd01 100644
--- a/compiler/cmm/CmmUtils.hs
+++ b/compiler/cmm/CmmUtils.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE CPP, GADTs, RankNTypes #-}
-----------------------------------------------------------------------------
--
@@ -55,7 +55,10 @@ module CmmUtils(
analFwd, analBwd, analRewFwd, analRewBwd,
dataflowPassFwd, dataflowPassBwd, dataflowAnalFwd, dataflowAnalBwd,
- dataflowAnalFwdBlocks
+ dataflowAnalFwdBlocks,
+
+ -- * Ticks
+ blockTicks
) where
#include "HsVersions.h"
@@ -72,6 +75,7 @@ import Unique
import UniqSupply
import DynFlags
import Util
+import CoreSyn ( RawTickish )
import Data.Word
import Data.Maybe
@@ -286,22 +290,23 @@ cmmOffsetLitB = cmmOffsetLit
-----------------------
-- The "W" variants take word offsets
+
cmmOffsetExprW :: DynFlags -> CmmExpr -> CmmExpr -> CmmExpr
-- The second arg is a *word* offset; need to change it to bytes
cmmOffsetExprW dflags e (CmmLit (CmmInt n _)) = cmmOffsetW dflags e (fromInteger n)
cmmOffsetExprW dflags e wd_off = cmmIndexExpr dflags (wordWidth dflags) e wd_off
cmmOffsetW :: DynFlags -> CmmExpr -> WordOff -> CmmExpr
-cmmOffsetW dflags e n = cmmOffsetB dflags e (wORD_SIZE dflags * n)
+cmmOffsetW dflags e n = cmmOffsetB dflags e (wordsToBytes dflags n)
cmmRegOffW :: DynFlags -> CmmReg -> WordOff -> CmmExpr
-cmmRegOffW dflags reg wd_off = cmmRegOffB reg (wd_off * wORD_SIZE dflags)
+cmmRegOffW dflags reg wd_off = cmmRegOffB reg (wordsToBytes dflags wd_off)
cmmOffsetLitW :: DynFlags -> CmmLit -> WordOff -> CmmLit
-cmmOffsetLitW dflags lit wd_off = cmmOffsetLitB lit (wORD_SIZE dflags * wd_off)
+cmmOffsetLitW dflags lit wd_off = cmmOffsetLitB lit (wordsToBytes dflags wd_off)
cmmLabelOffW :: DynFlags -> CLabel -> WordOff -> CmmLit
-cmmLabelOffW dflags lbl wd_off = cmmLabelOffB lbl (wORD_SIZE dflags * wd_off)
+cmmLabelOffW dflags lbl wd_off = cmmLabelOffB lbl (wordsToBytes dflags wd_off)
cmmLoadIndexW :: DynFlags -> CmmExpr -> Int -> CmmType -> CmmExpr
cmmLoadIndexW dflags base off ty = CmmLoad (cmmOffsetW dflags base off) ty
@@ -492,7 +497,7 @@ mapGraphNodes :: ( CmmNode C O -> CmmNode C O
, CmmNode O C -> CmmNode O C)
-> CmmGraph -> CmmGraph
mapGraphNodes funs@(mf,_,_) g =
- ofBlockMap (entryLabel $ mf $ CmmEntry $ g_entry g) $ mapMap (mapBlock3' funs) $ toBlockMap g
+ ofBlockMap (entryLabel $ mf $ CmmEntry (g_entry g) []) $ mapMap (mapBlock3' funs) $ toBlockMap g
mapGraphNodes1 :: (forall e x. CmmNode e x -> CmmNode e x) -> CmmGraph -> CmmGraph
mapGraphNodes1 f = modifyGraph (mapGraph f)
@@ -566,3 +571,13 @@ dataflowPassBwd :: NonLocal n =>
dataflowPassBwd (CmmGraph {g_entry=entry, g_graph=graph}) facts bwd = do
(graph, facts, NothingO) <- analyzeAndRewriteBwd bwd (JustC [entry]) graph (mkFactBase (bp_lattice bwd) facts)
return (CmmGraph {g_entry=entry, g_graph=graph}, facts)
+
+-------------------------------------------------
+-- Tick utilities
+
+blockTicks :: Block CmmNode C C -> [RawTickish]
+blockTicks b = reverse $ foldBlockNodesF goStmt b []
+ where goStmt :: CmmNode e x -> [RawTickish] -> [RawTickish]
+ goStmt (CmmTick t) ts = t:ts
+ goStmt _other ts = ts
+
diff --git a/compiler/cmm/Debug.hs b/compiler/cmm/Debug.hs
new file mode 100644
index 000000000000..175966a8bb2f
--- /dev/null
+++ b/compiler/cmm/Debug.hs
@@ -0,0 +1,498 @@
+
+{-# LANGUAGE GADTs, CPP #-}
+
+-----------------------------------------------------------------------------
+--
+-- Debugging data
+--
+-- Association of debug data on the Cmm level, with methods to encode it in
+-- event log format for later inclusion in profiling event logs.
+--
+-----------------------------------------------------------------------------
+
+module Debug (
+
+ DebugBlock(..), dblIsEntry,
+ UnwindTable, UnwindExpr(..),
+ cmmDebugGen,
+ cmmDebugLink,
+ debugToMap,
+ writeDebugToEventlog
+
+ ) where
+
+import Binary
+import BlockId ( blockLbl )
+import CLabel
+import Cmm
+import CmmUtils
+import CoreSyn
+import DynFlags
+import FastString ( nilFS, mkFastString, unpackFS )
+import Module
+import Outputable
+import PprCore ()
+import PprCmmExpr ( pprExpr )
+import SrcLoc
+import UniqFM
+import Util
+import Var ( Var, varType )
+
+import Compiler.Hoopl
+
+import Control.Monad ( foldM, forM_, void, when, ap )
+import Control.Applicative ( Applicative(..) )
+
+import Data.Char ( ord)
+import Data.Maybe
+import Data.List ( find, minimumBy )
+import Data.Ord ( comparing )
+import qualified Data.Map as Map
+import Data.Word ( Word8, Word16 )
+
+import Foreign.ForeignPtr
+
+#define EVENTLOG_CONSTANTS_ONLY
+#include "../../includes/rts/EventLogFormat.h"
+
+-- | Debug information about a block of code. Context is encoded through nesting.
+data DebugBlock =
+ DebugBlock
+ { dblProcedure :: !Label -- ^ Entry label of containing producedure
+ , dblLabel :: !Label -- ^ Hoopl label
+ , dblCLabel :: !CLabel -- ^ Output label
+ , dblHasInfoTbl :: !Bool -- ^ Has an info table?
+ , dblTicks :: ![RawTickish] -- ^ Ticks defined in this block
+ , dblSourceTick :: !(Maybe RawTickish) -- ^ Best source tick covering this block
+ , dblPosition :: !(Maybe Int) -- ^ Output position relative to other blocks in proc.
+ -- @Nothing@ means the block has been optimized out.
+ , dblUnwind :: !UnwindTable -- ^ Unwind information
+ , dblBlocks :: ![DebugBlock] -- ^ Nested blocks
+ }
+
+dblIsEntry :: DebugBlock -> Bool
+dblIsEntry blk = dblProcedure blk == dblLabel blk
+
+instance Outputable DebugBlock where
+ ppr blk = (if dblProcedure blk == dblLabel blk
+ then text "proc "
+ else if dblHasInfoTbl blk
+ then text "pp-blk "
+ else text "blk ") <>
+ ppr (dblLabel blk) <+> parens (ppr (dblCLabel blk)) <+>
+ (maybe empty ppr (dblSourceTick blk)) <+>
+ (maybe empty ppr (find isCore (dblTicks blk))) <+>
+ (maybe (text "removed") ((text "pos " <>) . ppr) (dblPosition blk)) <+>
+ pprUwMap (dblUnwind blk) $$
+ (if null (dblBlocks blk) then empty else ppr (dblBlocks blk))
+ where isCore CoreNote{} = True
+ isCore _other = False
+ pprUw (g, expr) = ppr g <> char '=' <> ppr expr
+ pprUwMap = braces . hsep . punctuate comma . map pprUw . Map.toList
+
+type BlockContext = (CmmBlock, RawCmmDecl, UnwindTable)
+
+-- | Extract debug data from a group of procedures
+cmmDebugGen :: ModLocation -> RawCmmGroup -> [DebugBlock]
+cmmDebugGen modLoc decls = map (blocksForScope Nothing) topScopes
+ where
+ blockCtxs = blockContexts decls
+
+ -- Analyse tick scope structure: Each one is either a top-level
+ -- tick scope, or the child of another. Note that we are using
+ -- "postfix" instead of "subset" relation here, implicitly
+ -- reducing the graph to a tree.
+ (topScopes, childScopes)
+ = splitEithers $ map (\a -> findP a a) $ Map.keys blockCtxs
+ findP tsc [] = Left tsc
+ findP tsc (_:sc) | sc `Map.member` blockCtxs = Right (sc, tsc)
+ | otherwise = findP tsc sc
+ scopeMap = foldr (uncurry insertMulti) Map.empty childScopes
+
+ -- Finding the "best" source tick is somewhat arbitrary -- we
+ -- select the first source span, while preferring source ticks
+ -- from the same source file. Furthermore, dumps take priority
+ -- (if we generated one, we probably want debug information to
+ -- refer to it).
+ bestSrcTick = minimumBy (comparing rangeRating)
+ rangeRating (SourceNote span _)
+ | isDumpSrcSpan span = 0
+ | srcSpanFile span == thisFile = 1
+ | otherwise = 2 :: Int
+ rangeRating _non_source_note = error "rangeRating"
+ thisFile = maybe nilFS mkFastString $ ml_hs_file modLoc
+
+ -- Returns block tree for this scope as well as all nested
+ -- scopes. Note that if there are multiple blocks in the (exact)
+ -- same scope we elect one as the "branch" node and add the rest
+ -- as children.
+ blocksForScope cstick scope = mkBlock True (head bctxs)
+ where bctxs = fromJust $ Map.lookup scope blockCtxs
+ nested = fromMaybe [] $ Map.lookup scope scopeMap
+ childs = map (mkBlock False) (tail bctxs) ++
+ map (blocksForScope stick) nested
+ mkBlock top (block, prc, unwind)
+ = DebugBlock { dblProcedure = g_entry graph
+ , dblLabel = label
+ , dblCLabel = case info of
+ Just (Statics infoLbl _) -> infoLbl
+ Nothing
+ | g_entry graph == label -> entryLbl
+ | otherwise -> blockLbl label
+ , dblHasInfoTbl = isJust info
+ , dblTicks = if top then ticks else []
+ , dblPosition = Nothing -- updated in cmmDebugLink
+ , dblUnwind = unwind
+ , dblSourceTick = stick
+ , dblBlocks = if top then childs else []
+ }
+ where (CmmProc infos entryLbl _ graph) = prc
+ label = entryLabel block
+ info = mapLookup label infos
+
+ -- A source tick scopes over all nested blocks. However
+ -- their source ticks might take priority.
+ isSourceTick SourceNote {} = True
+ isSourceTick _ = False
+ ticks = concatMap (blockTicks . fstOf3) bctxs
+ stick = case filter isSourceTick ticks of
+ [] -> cstick
+ sticks -> Just $! bestSrcTick (sticks ++ maybeToList cstick)
+
+-- | Build a map of blocks sorted by their tick scopes
+--
+-- This involves a pre-order traversal, as we want blocks in rough
+-- control flow order (so ticks have a chance to be sorted in the
+-- right order). We also use this opportunity to have blocks inherit
+-- unwind information from their predecessor blocks where it is
+-- lacking.
+blockContexts :: RawCmmGroup -> Map.Map [CmmTickScope] [BlockContext]
+blockContexts decls = Map.map reverse $ foldr walkProc Map.empty decls
+ where walkProc CmmData{} m = m
+ walkProc prc@(CmmProc _ _ _ graph) m
+ | mapNull blocks = m
+ | otherwise = snd $ walkBlock prc entry Map.empty (emptyLbls, m)
+ where blocks = toBlockMap graph
+ entry = [mapFind (g_entry graph) blocks]
+ emptyLbls = setEmpty :: LabelSet
+ walkBlock _ [] _ c = c
+ walkBlock prc (block:blocks) unwind (visited, m)
+ | lbl `setMember` visited
+ = walkBlock prc blocks unwind (visited, m)
+ | otherwise
+ = walkBlock prc blocks unwind $
+ walkBlock prc succs unwind' (lbl `setInsert` visited,
+ insertMulti scope (block, prc, unwind') m)
+ where CmmEntry lbl scope = firstNode block
+ unwind' = extractUnwind block `Map.union` unwind
+ (CmmProc _ _ _ graph) = prc
+ succs = map (flip mapFind (toBlockMap graph))
+ (successors (lastNode block))
+ mapFind = mapFindWithDefault (error "contextTree: block not found!")
+
+insertMulti :: Ord k => k -> a -> Map.Map k [a] -> Map.Map k [a]
+insertMulti k v = Map.insertWith (const (v:)) k [v]
+
+-- | Sets position fields in the debug block tree according to native
+-- generated code.
+cmmDebugLink :: (i -> Bool) -> GenCmmGroup d g (ListGraph i)
+ -> [DebugBlock] -> [DebugBlock]
+cmmDebugLink isMeta nats blocks =
+ let -- Find order in which procedures will be generated by the
+ -- back-end (that actually matters for DWARF generation).
+ --
+ -- Note that we might encounter blocks that are missing or only
+ -- consist of meta instructions -- we will declare them missing,
+ -- which will skip debug data generation without messing up the
+ -- block hierarchy.
+ getBlocks (CmmProc _ _ _ (ListGraph bs)) = bs
+ getBlocks _other = []
+ allMeta (BasicBlock _ instrs) = all isMeta instrs
+ blockPosition :: LabelMap Int
+ blockPosition = mapFromList $ flip zip [0..] $ map blockId $ filter (not . allMeta) $
+ concatMap getBlocks nats
+
+ link block = block { dblPosition = mapLookup (dblLabel block) blockPosition
+ , dblBlocks = map link (dblBlocks block)
+ }
+ in map link blocks
+
+-- | Converts debug blocks into a label map for easier lookups
+debugToMap :: [DebugBlock] -> LabelMap DebugBlock
+debugToMap = mapUnions . map go
+ where go b = mapInsert (dblLabel b) b $ mapUnions $ map go (dblBlocks b)
+
+-- | Maps registers to expressions that yield their "old" values
+-- further up the stack. Most interesting for the stack pointer Sp,
+-- but might be useful to document saved registers, too.
+type UnwindTable = Map.Map GlobalReg UnwindExpr
+
+-- | Expressions, used for unwind information
+data UnwindExpr = UwConst Int -- ^ literal value
+ | UwReg GlobalReg Int -- ^ register plus offset
+ | UwDeref UnwindExpr -- ^ pointer dereferencing
+ | UwPlus UnwindExpr UnwindExpr
+ | UwMinus UnwindExpr UnwindExpr
+ | UwTimes UnwindExpr UnwindExpr
+ deriving (Eq)
+
+instance Outputable UnwindExpr where
+ pprPrec _ (UwConst i) = ppr i
+ pprPrec _ (UwReg g 0) = ppr g
+ pprPrec p (UwReg g x) = pprPrec p (UwPlus (UwReg g 0) (UwConst x))
+ pprPrec _ (UwDeref e) = char '*' <> pprPrec 3 e
+ pprPrec p (UwPlus e0 e1) | p <= 0
+ = pprPrec 0 e0 <> char '+' <> pprPrec 0 e1
+ pprPrec p (UwMinus e0 e1) | p <= 0
+ = pprPrec 1 e0 <> char '-' <> pprPrec 1 e1
+ pprPrec p (UwTimes e0 e1) | p <= 1
+ = pprPrec 2 e0 <> char '*' <> pprPrec 2 e1
+ pprPrec _ other = parens (pprPrec 0 other)
+
+extractUnwind :: CmmBlock -> UnwindTable
+extractUnwind b = go $ blockToList mid
+ where (_, mid, _) = blockSplit b
+ go :: [CmmNode O O] -> UnwindTable
+ go [] = Map.empty
+ go (x : xs) = case x of
+ CmmUnwind g so -> Map.insert g (toUnwindExpr so) $! go xs
+ CmmTick {} -> go xs
+ _other -> Map.empty -- TODO: Unwind statements after actual instructions
+
+-- | Conversion of Cmm expressions to unwind expressions. We check for
+-- unsupported operator usages and simplify the expression as far as
+-- possible.
+toUnwindExpr :: CmmExpr -> UnwindExpr
+toUnwindExpr (CmmLit (CmmInt i _)) = UwConst (fromIntegral i)
+toUnwindExpr (CmmRegOff (CmmGlobal g) i) = UwReg g i
+toUnwindExpr (CmmReg (CmmGlobal g)) = UwReg g 0
+toUnwindExpr (CmmLoad e _) = UwDeref (toUnwindExpr e)
+toUnwindExpr e@(CmmMachOp op [e1, e2]) =
+ case (op, toUnwindExpr e1, toUnwindExpr e2) of
+ (MO_Add{}, UwReg r x, UwConst y) -> UwReg r (x + y)
+ (MO_Sub{}, UwReg r x, UwConst y) -> UwReg r (x - y)
+ (MO_Add{}, UwConst x, UwReg r y) -> UwReg r (x + y)
+ (MO_Add{}, UwConst x, UwConst y) -> UwConst (x + y)
+ (MO_Sub{}, UwConst x, UwConst y) -> UwConst (x - y)
+ (MO_Mul{}, UwConst x, UwConst y) -> UwConst (x * y)
+ (MO_Add{}, u1, u2 ) -> UwPlus u1 u2
+ (MO_Sub{}, u1, u2 ) -> UwMinus u1 u2
+ (MO_Mul{}, u1, u2 ) -> UwTimes u1 u2
+ _otherwise -> pprPanic "Unsupported operator in unwind expression!" (pprExpr e)
+toUnwindExpr e
+ = pprPanic "Unsupported unwind expression!" (ppr e)
+
+-- | Generates debug data into a buffer
+writeDebugToEventlog :: DynFlags -> ModLocation -> [DebugBlock] -> IO (Int, ForeignPtr Word8)
+writeDebugToEventlog dflags mod_loc blocks = do
+
+ -- Write data into a binary memory handle
+ bh <- openBinMem $ 1024 * 1024
+ let code = do putEvent EVENT_DEBUG_MODULE $ do
+ putString $ packageKeyString (thisPackage dflags)
+ putString $ fromMaybe "???" $ ml_hs_file mod_loc
+ foldM (putBlock maxBound) 0 blocks
+ void $ runPutDbg code bh dflags emptyUFM
+ getBinMemBuf bh
+
+-- | Packs the given static value into a (variable-length) event-log
+-- packet.
+putEvent :: Word8 -> PutDbgM () -> PutDbgM ()
+putEvent id cts
+ = PutDbgM $ \bh df cm ->
+ let wrap = do
+ put_ bh id
+ -- Put placeholder for size
+ sizePos <- put bh (0 :: Word16)
+ -- Put contents
+ res <- runPutDbg cts bh df cm
+ -- Put final size
+ endPos <- tellBin bh
+ putAt bh sizePos $ fromIntegral $ (endPos `diffBin` sizePos) - 2
+ -- Seek back
+ seekBin bh endPos
+ return res
+ in do catchSize bh 0x10000 wrap (return (cm, ()))
+
+-- | Puts an alternate version if the first one is bigger than the
+-- given limit.
+--
+-- This is a pretty crude way of handling oversized
+-- packets... Can't think of a better way right now though.
+catchSize :: BinHandle -> Int -> IO a -> IO a -> IO a
+catchSize bh limit cts1 cts2 = do
+
+ -- Put contents, note how much size it uses
+ start <- tellBin bh :: IO (Bin ())
+ a <- cts1
+ end <- tellBin bh
+
+ -- Seek back and put second version if size is over limit
+ if (end `diffBin` start) >= limit
+ then seekBin bh start >> cts2
+ else return a
+
+type BlockId = Word16
+
+putBlock :: BlockId -> BlockId -> DebugBlock -> PutDbgM BlockId
+putBlock pid bid block = do
+ -- Put sub-blocks
+ bid' <- foldM (putBlock bid) (bid+1) (dblBlocks block)
+ -- Write our own data
+ putEvent EVENT_DEBUG_BLOCK $ do
+ putDbg bid
+ putDbg pid
+ dflags <- getDynFlags
+ let showSDocC = flip (renderWithStyle dflags) (mkCodeStyle CStyle)
+ putString $ showSDocC $ ppr $ dblCLabel block
+ -- Write annotations.
+ mapM_ putAnnotEvent (dblTicks block)
+ return bid'
+
+putAnnotEvent :: RawTickish -> PutDbgM ()
+putAnnotEvent (SourceNote ss names) =
+ putEvent EVENT_DEBUG_SOURCE $ do
+ putDbg $ encLoc $ srcSpanStartLine ss
+ putDbg $ encLoc $ srcSpanStartCol ss
+ putDbg $ encLoc $ srcSpanEndLine ss
+ putDbg $ encLoc $ srcSpanEndCol ss
+ putString $ unpackFS $ srcSpanFile ss
+ putString names
+ where encLoc x = fromIntegral x :: Word16
+
+putAnnotEvent (CoreNote lbl corePtr)
+ -- This piece of core was already covered earlier in this block?
+ = do elem <- elemCoreMap (lbl, exprPtrCons corePtr)
+ when (not elem) $ putEvent EVENT_DEBUG_CORE $ do
+ dflags <- getDynFlags
+ putString $ showSDocDump dflags $ ppr lbl
+ -- Emit core, leaving out (= referencing) any core pieces
+ -- that were emitted from sub-blocks
+ case corePtr of
+ ExprPtr core -> putCoreExpr core >> addToCoreMap lbl DEFAULT
+ AltPtr alt -> putCoreAlt alt >> addToCoreMap lbl (fstOf3 alt)
+
+putAnnotEvent _ = return ()
+
+-- | Constants for core binary representation
+coreMisc, coreApp, coreRef, coreLam, coreLet, coreCase, coreAlt :: Word8
+coreMisc = 0
+coreApp = 1
+coreRef = 2
+coreLam = 3
+coreLet = 4
+coreCase = 5
+coreAlt = 6
+
+type CoreMap = UniqFM [AltCon]
+newtype PutDbgM a = PutDbgM { runPutDbg :: BinHandle -> DynFlags -> CoreMap -> IO (CoreMap, a) }
+
+instance Functor PutDbgM where
+ fmap f m = PutDbgM $ \bh df cm -> runPutDbg m bh df cm >>= \(cm', a) -> return (cm', f a)
+instance Monad PutDbgM where
+ return x = PutDbgM $ \_ _ cm -> return (cm, x)
+ m >>= f = PutDbgM $ \bh df cm -> runPutDbg m bh df cm >>= \(cm', a) -> runPutDbg (f a) bh df cm'
+instance Applicative PutDbgM where
+ pure = return
+ (<*>) = ap
+
+instance HasDynFlags PutDbgM where
+ getDynFlags = PutDbgM $ \_ df cm -> return (cm,df)
+
+putDbg :: Binary a => a -> PutDbgM ()
+putDbg x = PutDbgM $ \bf _ cm -> put_ bf x >> return (cm,())
+
+-- | Put a C-style string (null-terminated). We assume that the string
+-- is ASCII.
+--
+-- This could well be subject to change in future...
+putString :: String -> PutDbgM ()
+putString str = do
+ let putByte = putDbg :: Word8 -> PutDbgM ()
+ mapM_ (putByte . fromIntegral . ord) str
+ putByte 0
+
+putSDoc :: SDoc -> PutDbgM ()
+putSDoc str = do
+ dflags <- getDynFlags
+ putString $ showSDoc dflags str
+
+elemCoreMap :: (Var, AltCon) -> PutDbgM Bool
+elemCoreMap (bind, con) = PutDbgM $ \_ _ cm -> return $ (,) cm $
+ case lookupUFM cm bind of
+ Just cs -> con `elem` cs
+ Nothing -> False
+
+addToCoreMap :: Var -> AltCon -> PutDbgM ()
+addToCoreMap b a = PutDbgM $ \_ _ cm -> return (addToUFM_C (\o _ -> a:o) cm b [a], ())
+
+putCoreExpr :: CoreExpr -> PutDbgM ()
+putCoreExpr (App e1 e2) = do
+ putDbg coreApp
+ putCoreExpr e1
+ putCoreExpr e2
+putCoreExpr (Lam b e) = do
+ putDbg coreLam
+ putSDoc $ ppr b
+ putSDoc $ ppr $ varType b
+ putCoreExpr e
+putCoreExpr (Let es e) = do
+ putDbg coreLet
+ putCoreLet es
+ putCoreExpr e
+putCoreExpr (Case expr bind _ alts) = do
+ putDbg coreCase
+ putCoreExpr expr
+ putSDoc $ ppr bind
+ putSDoc $ ppr $ varType bind
+ putDbg (fromIntegral (length alts) :: Word16)
+ forM_ alts $ \alt@(a, _, _) ->
+ checkCoreRef (bind, a) $
+ putCoreAlt alt
+putCoreExpr (Cast e _) = putCoreExpr e
+putCoreExpr (Tick _ e) = putCoreExpr e
+-- All other elements are supposed to have a simple "pretty printed"
+-- representation that we can simply output verbatim.
+putCoreExpr other = do
+ putDbg coreMisc
+ putSDoc $ ppr other
+
+putCoreAlt :: CoreAlt -> PutDbgM ()
+putCoreAlt (a,binds,e) = do
+ putDbg coreAlt
+ putSDoc $ case a of
+ DEFAULT -> empty
+ _ -> ppr a
+ putDbg (fromIntegral (length binds) :: Word16)
+ forM_ binds $ \b -> do
+ putSDoc . ppr $ b
+ putSDoc . ppr . varType $ b
+ putCoreExpr e
+
+putCoreLet :: CoreBind -> PutDbgM ()
+putCoreLet (NonRec b e) = do
+ putDbg (1 :: Word16) -- could use 0 to mark non-recursive case?
+ putSDoc $ ppr b
+ putSDoc $ ppr $ varType b
+ checkCoreRef (b, DEFAULT) $ putCoreExpr e
+putCoreLet (Rec ps) = do
+ putDbg (fromIntegral $ length ps :: Word16)
+ forM_ ps $ \(b, e) -> do
+ putSDoc $ ppr b
+ putSDoc $ ppr $ varType b
+ checkCoreRef (b, DEFAULT) $ putCoreExpr e
+
+-- | Generate reference to core piece that was output elsewhere... Or
+-- proceed with given code otherwise.
+checkCoreRef :: (Var, AltCon) -> PutDbgM () -> PutDbgM ()
+checkCoreRef (b,a) code = do
+ elem <- elemCoreMap (b,a)
+ if elem
+ then do putDbg coreRef
+ dflags <- getDynFlags
+ putString $ showSDocDump dflags $ ppr b
+ putSDoc $ case a of
+ DEFAULT -> empty
+ _ -> ppr a
+ else do addToCoreMap b a
+ code
diff --git a/compiler/cmm/Hoopl.hs b/compiler/cmm/Hoopl.hs
index 08d95b5073ea..4b3717288f47 100644
--- a/compiler/cmm/Hoopl.hs
+++ b/compiler/cmm/Hoopl.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE RankNTypes, ScopedTypeVariables #-}
+
module Hoopl (
module Compiler.Hoopl,
module Hoopl.Dataflow,
@@ -36,7 +38,7 @@ deepFwdRw f = deepFwdRw3 f f f
-- But rw and rw' are single functions.
thenFwdRw :: forall n f.
FwdRewrite UniqSM n f
- -> FwdRewrite UniqSM n f
+ -> FwdRewrite UniqSM n f
-> FwdRewrite UniqSM n f
thenFwdRw rw3 rw3' = wrapFR2 thenrw rw3 rw3'
where
@@ -124,3 +126,30 @@ badd_rw :: BwdRewrite UniqSM n f
-> (Graph n e x, BwdRewrite UniqSM n f)
-> (Graph n e x, BwdRewrite UniqSM n f)
badd_rw rw2 (g, rw1) = (g, rw1 `thenBwdRw` rw2)
+
+-- Note [Deprecations in Hoopl]
+-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--
+-- CmmLive and CmmBuildInfoTables modules enable -fno-warn-warnings-deprecations
+-- flag because they import deprecated functions from Hoopl. I spent some time
+-- trying to figure out what is going on, so here's a brief explanation. The
+-- culprit is the joinOutFacts function, which should be replaced with
+-- joinFacts. The difference between them is that the latter one needs extra
+-- Label parameter. Labels identify blocks and are used in the fact base to
+-- assign facts to a block (in case you're wondering, Label is an Int wrapped in
+-- a newtype). Lattice join function is also required to accept a Label but the
+-- only reason why it is so are the debugging purposes: see joinInFacts function
+-- which is a no-op and is run only because join function might produce
+-- debugging output. Now, going back to the Cmm modules. The "problem" with the
+-- deprecated joinOutFacts function is that it passes wrong label when calling
+-- lattice join function: instead of label of a block for which we are joining
+-- facts it uses labels of successors of that block. So the joinFacts function
+-- expects to be given a label of a block for which we are joining facts. I
+-- don't see an obvious way of recovering that Label at the call sites of
+-- joinOutFacts (if that was easily done then joinFacts function could do it
+-- internally without requiring label as a parameter). A cheap way of
+-- eliminating these warnings would be to create a bogus Label, since none of
+-- our join functions is actually using the Label parameter. But that doesn't
+-- feel right. I think the real solution here is to fix Hoopl API, which is
+-- already broken in several ways. See Hoopl/Cleanup page on the wiki for more
+-- notes on improving Hoopl.
diff --git a/compiler/cmm/Hoopl/Dataflow.hs b/compiler/cmm/Hoopl/Dataflow.hs
index 78b930a20f7e..f5511515a931 100644
--- a/compiler/cmm/Hoopl/Dataflow.hs
+++ b/compiler/cmm/Hoopl/Dataflow.hs
@@ -1,3 +1,12 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE GADTs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE RankNTypes #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE Trustworthy #-}
+{-# LANGUAGE TypeFamilies #-}
+{-# OPTIONS_GHC -fprof-auto-top #-}
+
--
-- Copyright (c) 2010, João Dias, Simon Marlow, Simon Peyton Jones,
-- and Norman Ramsey
@@ -9,10 +18,6 @@
-- specialised to the UniqSM monad.
--
-{-# LANGUAGE RankNTypes, ScopedTypeVariables, GADTs, EmptyDataDecls, PatternGuards, TypeFamilies, MultiParamTypeClasses #-}
-{-# OPTIONS_GHC -fprof-auto-top #-}
-{-# LANGUAGE Trustworthy #-}
-
module Hoopl.Dataflow
( DataflowLattice(..), OldFact(..), NewFact(..), Fact, mkFactBase
, ChangeFlag(..)
diff --git a/compiler/cmm/MkGraph.hs b/compiler/cmm/MkGraph.hs
index 6f9bbf8872ea..b2e7a7785227 100644
--- a/compiler/cmm/MkGraph.hs
+++ b/compiler/cmm/MkGraph.hs
@@ -1,7 +1,7 @@
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE BangPatterns, CPP, GADTs #-}
module MkGraph
- ( CmmAGraph, CgStmt(..)
+ ( CmmAGraph, CmmAGraphScoped, CgStmt(..)
, (<*>), catAGraphs
, mkLabel, mkMiddle, mkLast, outOfLine
, lgraphOfAGraph, labelAGraph
@@ -13,6 +13,7 @@ module MkGraph
, mkRawJump
, mkCbranch, mkSwitch
, mkReturn, mkComment, mkCallEntry, mkBranch
+ , mkUnwind
, copyInOflow, copyOutOflow
, noExtraStack
, toCall, Transfer(..)
@@ -61,19 +62,20 @@ import Prelude (($),Int,Eq(..)) -- avoid importing (<*>)
-- by providing a label for the entry point; see 'labelAGraph'.
--
type CmmAGraph = OrdList CgStmt
+type CmmAGraphScoped = (CmmAGraph, [CmmTickScope])
data CgStmt
- = CgLabel BlockId
+ = CgLabel BlockId [CmmTickScope]
| CgStmt (CmmNode O O)
| CgLast (CmmNode O C)
- | CgFork BlockId CmmAGraph
+ | CgFork BlockId CmmAGraphScoped
-flattenCmmAGraph :: BlockId -> CmmAGraph -> CmmGraph
-flattenCmmAGraph id stmts =
+flattenCmmAGraph :: BlockId -> CmmAGraphScoped -> CmmGraph
+flattenCmmAGraph id stmts_t =
CmmGraph { g_entry = id,
g_graph = GMany NothingO body NothingO }
where
- body = foldr addBlock emptyBody $ flatten id stmts []
+ body = foldr addBlock emptyBody $ flatten id stmts_t []
--
-- flatten: given an entry label and a CmmAGraph, make a list of blocks.
@@ -81,9 +83,9 @@ flattenCmmAGraph id stmts =
-- NB. avoid the quadratic-append trap by passing in the tail of the
-- list. This is important for Very Long Functions (e.g. in T783).
--
- flatten :: Label -> CmmAGraph -> [Block CmmNode C C] -> [Block CmmNode C C]
- flatten id g blocks
- = flatten1 (fromOL g) (blockJoinHead (CmmEntry id) emptyBlock) blocks
+ flatten :: Label -> CmmAGraphScoped -> [Block CmmNode C C] -> [Block CmmNode C C]
+ flatten id (g, tscope) blocks
+ = flatten1 (fromOL g) (blockJoinHead (CmmEntry id tscope) emptyBlock) blocks
--
-- flatten0: we are outside a block at this point: any code before
@@ -92,12 +94,12 @@ flattenCmmAGraph id stmts =
flatten0 :: [CgStmt] -> [Block CmmNode C C] -> [Block CmmNode C C]
flatten0 [] blocks = blocks
- flatten0 (CgLabel id : stmts) blocks
+ flatten0 (CgLabel id tscope : stmts) blocks
= flatten1 stmts block blocks
- where !block = blockJoinHead (CmmEntry id) emptyBlock
+ where !block = blockJoinHead (CmmEntry id tscope) emptyBlock
- flatten0 (CgFork fork_id stmts : rest) blocks
- = flatten fork_id stmts $ flatten0 rest blocks
+ flatten0 (CgFork fork_id stmts_t : rest) blocks
+ = flatten fork_id stmts_t $ flatten0 rest blocks
flatten0 (CgLast _ : stmts) blocks = flatten0 stmts blocks
flatten0 (CgStmt _ : stmts) blocks = flatten0 stmts blocks
@@ -127,14 +129,14 @@ flattenCmmAGraph id stmts =
= flatten1 stmts block' blocks
where !block' = blockSnoc block stmt
- flatten1 (CgFork fork_id stmts : rest) block blocks
- = flatten fork_id stmts $ flatten1 rest block blocks
+ flatten1 (CgFork fork_id stmts_t : rest) block blocks
+ = flatten fork_id stmts_t $ flatten1 rest block blocks
-- a label here means that we should start a new block, and the
-- current block should fall through to the new block.
- flatten1 (CgLabel id : stmts) block blocks
+ flatten1 (CgLabel id tscope : stmts) block blocks
= blockJoinTail block (CmmBranch id) :
- flatten1 stmts (blockJoinHead (CmmEntry id) emptyBlock) blocks
+ flatten1 stmts (blockJoinHead (CmmEntry id tscope) emptyBlock) blocks
@@ -147,8 +149,8 @@ catAGraphs :: [CmmAGraph] -> CmmAGraph
catAGraphs = concatOL
-- | created a sequence "goto id; id:" as an AGraph
-mkLabel :: BlockId -> CmmAGraph
-mkLabel bid = unitOL (CgLabel bid)
+mkLabel :: BlockId -> [CmmTickScope] -> CmmAGraph
+mkLabel bid scp = unitOL (CgLabel bid scp)
-- | creates an open AGraph from a given node
mkMiddle :: CmmNode O O -> CmmAGraph
@@ -159,16 +161,17 @@ mkLast :: CmmNode O C -> CmmAGraph
mkLast last = unitOL (CgLast last)
-- | A labelled code block; should end in a last node
-outOfLine :: BlockId -> CmmAGraph -> CmmAGraph
-outOfLine l g = unitOL (CgFork l g)
+outOfLine :: BlockId -> CmmAGraphScoped -> CmmAGraph
+outOfLine l c = unitOL (CgFork l c)
-- | allocate a fresh label for the entry point
-lgraphOfAGraph :: CmmAGraph -> UniqSM CmmGraph
-lgraphOfAGraph g = do u <- getUniqueM
- return (labelAGraph (mkBlockId u) g)
+lgraphOfAGraph :: CmmAGraphScoped -> UniqSM CmmGraph
+lgraphOfAGraph g = do
+ u <- getUniqueM
+ return (labelAGraph (mkBlockId u) g)
-- | use the given BlockId as the label of the entry point
-labelAGraph :: BlockId -> CmmAGraph -> CmmGraph
+labelAGraph :: BlockId -> CmmAGraphScoped -> CmmGraph
labelAGraph lbl ag = flattenCmmAGraph lbl ag
---------- No-ops
@@ -263,6 +266,8 @@ mkJumpReturnsTo dflags f callConv actuals ret_lbl ret_off updfr_off = do
mkUnsafeCall :: ForeignTarget -> [CmmFormal] -> [CmmActual] -> CmmAGraph
mkUnsafeCall t fs as = mkMiddle $ CmmUnsafeForeignCall t fs as
+mkUnwind :: GlobalReg -> CmmExpr -> CmmAGraph
+mkUnwind g e = mkMiddle $ CmmUnwind g e
--------------------------------------------------------------------------
diff --git a/compiler/cmm/PprC.hs b/compiler/cmm/PprC.hs
index 23989811ddf8..90252853177f 100644
--- a/compiler/cmm/PprC.hs
+++ b/compiler/cmm/PprC.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, GADTs #-}
+
-----------------------------------------------------------------------------
--
-- Pretty-printing of Cmm as C, suitable for feeding gcc
@@ -16,7 +18,6 @@
--
-----------------------------------------------------------------------------
-{-# LANGUAGE GADTs #-}
module PprC (
writeCs,
pprStringInCStyle
@@ -137,10 +138,10 @@ pprTop (CmmData _section (Statics lbl lits)) =
pprBBlock :: CmmBlock -> SDoc
pprBBlock block =
- nest 4 (pprBlockId lbl <> colon) $$
+ nest 4 (pprBlockId (entryLabel block) <> colon) $$
nest 8 (vcat (map pprStmt (blockToList nodes)) $$ pprStmt last)
where
- (CmmEntry lbl, nodes, last) = blockSplit block
+ (_, nodes, last) = blockSplit block
-- --------------------------------------------------------------------------
-- Info tables. Just arrays of words.
@@ -170,13 +171,15 @@ pprStmt :: CmmNode e x -> SDoc
pprStmt stmt =
sdocWithDynFlags $ \dflags ->
case stmt of
- CmmEntry _ -> empty
+ CmmEntry{} -> empty
CmmComment _ -> empty -- (hang (ptext (sLit "/*")) 3 (ftext s)) $$ ptext (sLit "*/")
-- XXX if the string contains "*/", we need to fix it
-- XXX we probably want to emit these comments when
-- some debugging option is on. They can get quite
-- large.
+ CmmTick _ -> empty
+
CmmAssign dest src -> pprAssign dflags dest src
CmmStore dest src
@@ -752,6 +755,12 @@ pprCallishMachOp_for_C mop
MO_Memmove -> ptext (sLit "memmove")
(MO_BSwap w) -> ptext (sLit $ bSwapLabel w)
(MO_PopCnt w) -> ptext (sLit $ popCntLabel w)
+ (MO_Clz w) -> ptext (sLit $ clzLabel w)
+ (MO_Ctz w) -> ptext (sLit $ ctzLabel w)
+ (MO_AtomicRMW w amop) -> ptext (sLit $ atomicRMWLabel w amop)
+ (MO_Cmpxchg w) -> ptext (sLit $ cmpxchgLabel w)
+ (MO_AtomicRead w) -> ptext (sLit $ atomicReadLabel w)
+ (MO_AtomicWrite w) -> ptext (sLit $ atomicWriteLabel w)
(MO_UF_Conv w) -> ptext (sLit $ word2FloatLabel w)
MO_S_QuotRem {} -> unsupported
diff --git a/compiler/cmm/PprCmm.hs b/compiler/cmm/PprCmm.hs
index 46257b4188b7..78d5c8a28364 100644
--- a/compiler/cmm/PprCmm.hs
+++ b/compiler/cmm/PprCmm.hs
@@ -1,3 +1,6 @@
+{-# LANGUAGE GADTs, TypeFamilies, FlexibleContexts, FlexibleInstances #-}
+{-# OPTIONS_GHC -fno-warn-orphans #-}
+
----------------------------------------------------------------------------
--
-- Pretty-printing of Cmm as (a superset of) C--
@@ -30,8 +33,6 @@
--
-- A useful example pass over Cmm is in nativeGen/MachCodeGen.hs
-{-# OPTIONS_GHC -fno-warn-orphans #-}
-{-# LANGUAGE GADTs, TypeFamilies, FlexibleContexts #-}
module PprCmm
( module PprCmmDecl
, module PprCmmExpr
@@ -42,11 +43,13 @@ import BlockId ()
import CLabel
import Cmm
import CmmUtils
+import DynFlags
import FastString
import Outputable
import PprCmmDecl
import PprCmmExpr
import Util
+import PprCore ()
import BasicTypes
import Compiler.Hoopl
@@ -137,6 +140,9 @@ pprCmmGraph g
$$ nest 2 (vcat $ map ppr blocks)
$$ text "}"
where blocks = postorderDfs g
+ -- postorderDfs has the side-effect of discarding unreachable code,
+ -- so pretty-printed Cmm will omit any unreachable blocks. This can
+ -- sometimes be confusing.
---------------------------------------------
-- Outputting CmmNode and types which it contains
@@ -175,13 +181,23 @@ pprNode :: CmmNode e x -> SDoc
pprNode node = pp_node <+> pp_debug
where
pp_node :: SDoc
- pp_node = case node of
+ pp_node = sdocWithDynFlags $ \dflags -> case node of
-- label:
- CmmEntry id -> ppr id <> colon
+ CmmEntry id tscope -> ppr id <> colon <+>
+ (sdocWithDynFlags $ \dflags ->
+ ppWhen (gopt Opt_PprShowTicks dflags) (text "//" <+> ppr tscope))
-- // text
CmmComment s -> text "//" <+> ftext s
+ -- //tick bla<...>
+ CmmTick t -> if gopt Opt_PprShowTicks dflags
+ then ptext (sLit "//tick") <+> ppr t
+ else empty
+
+ -- unwind reg = expr;
+ CmmUnwind r e -> ptext (sLit "unwind ") <> ppr r <+> char '=' <+> ppr e
+
-- reg = expr;
CmmAssign reg expr -> ppr reg <+> equals <+> ppr expr <> semi
@@ -264,6 +280,8 @@ pprNode node = pp_node <+> pp_debug
else case node of
CmmEntry {} -> empty -- Looks terrible with text " // CmmEntry"
CmmComment {} -> empty -- Looks also terrible with text " // CmmComment"
+ CmmTick {} -> empty -- Looks equally bad with text " // CmmTick"
+ CmmUnwind {} -> text " // CmmUnwind"
CmmAssign {} -> text " // CmmAssign"
CmmStore {} -> text " // CmmStore"
CmmUnsafeForeignCall {} -> text " // CmmUnsafeForeignCall"
diff --git a/compiler/cmm/PprCmmDecl.hs b/compiler/cmm/PprCmmDecl.hs
index 354a3d456330..dd80f5cd56b6 100644
--- a/compiler/cmm/PprCmmDecl.hs
+++ b/compiler/cmm/PprCmmDecl.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
----------------------------------------------------------------------------
--
-- Pretty-printing of common Cmm types
diff --git a/compiler/cmm/SMRep.lhs b/compiler/cmm/SMRep.lhs
index 0185ababe50f..0713620c5a03 100644
--- a/compiler/cmm/SMRep.lhs
+++ b/compiler/cmm/SMRep.lhs
@@ -6,15 +6,17 @@
Storage manager representation of closures
\begin{code}
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
+{-# LANGUAGE CPP,GeneralizedNewtypeDeriving #-}
module SMRep (
-- * Words and bytes
+ WordOff, ByteOff,
+ wordsToBytes, bytesToWordsRoundUp,
+ roundUpToWords,
+
StgWord, fromStgWord, toStgWord,
StgHalfWord, fromStgHalfWord, toStgHalfWord,
hALF_WORD_SIZE, hALF_WORD_SIZE_IN_BITS,
- WordOff, ByteOff,
- roundUpToWords,
-- * Closure repesentation
SMRep(..), -- CmmInfo sees the rep; no one else does
@@ -23,21 +25,27 @@ module SMRep (
ConstrDescription,
-- ** Construction
- mkHeapRep, blackHoleRep, indStaticRep, mkStackRep, mkRTSRep,
+ mkHeapRep, blackHoleRep, indStaticRep, mkStackRep, mkRTSRep, arrPtrsRep,
+ smallArrPtrsRep, arrWordsRep,
-- ** Predicates
isStaticRep, isConRep, isThunkRep, isFunRep, isStaticNoCafCon,
isStackRep,
-- ** Size-related things
- heapClosureSize,
- fixedHdrSize, arrWordsHdrSize, arrPtrsHdrSize,
- profHdrSize, thunkHdrSize, nonHdrSize,
+ heapClosureSizeW,
+ fixedHdrSizeW, arrWordsHdrSize, arrWordsHdrSizeW, arrPtrsHdrSize,
+ arrPtrsHdrSizeW, profHdrSize, thunkHdrSize, nonHdrSize, nonHdrSizeW,
+ smallArrPtrsHdrSize, smallArrPtrsHdrSizeW, hdrSize, hdrSizeW,
+ fixedHdrSize,
-- ** RTS closure types
rtsClosureType, rET_SMALL, rET_BIG,
aRG_GEN, aRG_GEN_BIG,
+ -- ** Arrays
+ card, cardRoundUp, cardTableSizeB, cardTableSizeW,
+
-- * Operations over [Word8] strings that don't belong here
pprWord8String, stringToWord8s
) where
@@ -63,11 +71,33 @@ import Data.Bits
%************************************************************************
\begin{code}
-type WordOff = Int -- Word offset, or word count
-type ByteOff = Int -- Byte offset, or byte count
+-- | Word offset, or word count
+type WordOff = Int
+-- | Byte offset, or byte count
+type ByteOff = Int
+
+-- | Round up the given byte count to the next byte count that's a
+-- multiple of the machine's word size.
roundUpToWords :: DynFlags -> ByteOff -> ByteOff
-roundUpToWords dflags n = (n + (wORD_SIZE dflags - 1)) .&. (complement (wORD_SIZE dflags - 1))
+roundUpToWords dflags n =
+ (n + (wORD_SIZE dflags - 1)) .&. (complement (wORD_SIZE dflags - 1))
+
+-- | Convert the given number of words to a number of bytes.
+--
+-- This function morally has type @WordOff -> ByteOff@, but uses @Num
+-- a@ to allow for overloading.
+wordsToBytes :: Num a => DynFlags -> a -> a
+wordsToBytes dflags n = fromIntegral (wORD_SIZE dflags) * n
+{-# SPECIALIZE wordsToBytes :: DynFlags -> Int -> Int #-}
+{-# SPECIALIZE wordsToBytes :: DynFlags -> Word -> Word #-}
+{-# SPECIALIZE wordsToBytes :: DynFlags -> Integer -> Integer #-}
+
+-- | First round the given byte count up to a multiple of the
+-- machine's word size and then convert the result to words.
+bytesToWordsRoundUp :: DynFlags -> ByteOff -> WordOff
+bytesToWordsRoundUp dflags n = (n + word_size - 1) `quot` word_size
+ where word_size = wORD_SIZE dflags
\end{code}
StgWord is a type representing an StgWord on the target platform.
@@ -140,6 +170,16 @@ data SMRep
!WordOff -- # non-ptr words INCLUDING SLOP (see mkHeapRep below)
ClosureTypeInfo -- type-specific info
+ | ArrayPtrsRep
+ !WordOff -- # ptr words
+ !WordOff -- # card table words
+
+ | SmallArrayPtrsRep
+ !WordOff -- # ptr words
+
+ | ArrayWordsRep
+ !WordOff -- # bytes expressed in words, rounded up
+
| StackRep -- Stack frame (RET_SMALL or RET_BIG)
Liveness
@@ -221,13 +261,22 @@ blackHoleRep = HeapRep False 0 0 BlackHole
indStaticRep :: SMRep
indStaticRep = HeapRep True 1 0 IndStatic
+arrPtrsRep :: DynFlags -> WordOff -> SMRep
+arrPtrsRep dflags elems = ArrayPtrsRep elems (cardTableSizeW dflags elems)
+
+smallArrPtrsRep :: WordOff -> SMRep
+smallArrPtrsRep elems = SmallArrayPtrsRep elems
+
+arrWordsRep :: DynFlags -> ByteOff -> SMRep
+arrWordsRep dflags bytes = ArrayWordsRep (bytesToWordsRoundUp dflags bytes)
+
-----------------------------------------------------------------------------
-- Predicates
isStaticRep :: SMRep -> IsStatic
isStaticRep (HeapRep is_static _ _ _) = is_static
-isStaticRep (StackRep {}) = False
isStaticRep (RTSRep _ rep) = isStaticRep rep
+isStaticRep _ = False
isStackRep :: SMRep -> Bool
isStackRep StackRep{} = True
@@ -259,9 +308,12 @@ isStaticNoCafCon _ = False
-----------------------------------------------------------------------------
-- Size-related things
+fixedHdrSize :: DynFlags -> ByteOff
+fixedHdrSize dflags = wordsToBytes dflags (fixedHdrSizeW dflags)
+
-- | Size of a closure header (StgHeader in includes/rts/storage/Closures.h)
-fixedHdrSize :: DynFlags -> WordOff
-fixedHdrSize dflags = sTD_HDR_SIZE dflags + profHdrSize dflags
+fixedHdrSizeW :: DynFlags -> WordOff
+fixedHdrSizeW dflags = sTD_HDR_SIZE dflags + profHdrSize dflags
-- | Size of the profiling part of a closure header
-- (StgProfHeader in includes/rts/storage/Closures.h)
@@ -273,32 +325,73 @@ profHdrSize dflags
-- | The garbage collector requires that every closure is at least as
-- big as this.
minClosureSize :: DynFlags -> WordOff
-minClosureSize dflags = fixedHdrSize dflags + mIN_PAYLOAD_SIZE dflags
+minClosureSize dflags = fixedHdrSizeW dflags + mIN_PAYLOAD_SIZE dflags
arrWordsHdrSize :: DynFlags -> ByteOff
arrWordsHdrSize dflags
- = fixedHdrSize dflags * wORD_SIZE dflags + sIZEOF_StgArrWords_NoHdr dflags
+ = fixedHdrSize dflags + sIZEOF_StgArrWords_NoHdr dflags
+
+arrWordsHdrSizeW :: DynFlags -> WordOff
+arrWordsHdrSizeW dflags =
+ fixedHdrSizeW dflags +
+ (sIZEOF_StgArrWords_NoHdr dflags `quot` wORD_SIZE dflags)
arrPtrsHdrSize :: DynFlags -> ByteOff
arrPtrsHdrSize dflags
- = fixedHdrSize dflags * wORD_SIZE dflags + sIZEOF_StgMutArrPtrs_NoHdr dflags
+ = fixedHdrSize dflags + sIZEOF_StgMutArrPtrs_NoHdr dflags
+
+arrPtrsHdrSizeW :: DynFlags -> WordOff
+arrPtrsHdrSizeW dflags =
+ fixedHdrSizeW dflags +
+ (sIZEOF_StgMutArrPtrs_NoHdr dflags `quot` wORD_SIZE dflags)
+
+smallArrPtrsHdrSize :: DynFlags -> ByteOff
+smallArrPtrsHdrSize dflags
+ = fixedHdrSize dflags + sIZEOF_StgSmallMutArrPtrs_NoHdr dflags
+
+smallArrPtrsHdrSizeW :: DynFlags -> WordOff
+smallArrPtrsHdrSizeW dflags =
+ fixedHdrSizeW dflags +
+ (sIZEOF_StgSmallMutArrPtrs_NoHdr dflags `quot` wORD_SIZE dflags)
-- Thunks have an extra header word on SMP, so the update doesn't
-- splat the payload.
thunkHdrSize :: DynFlags -> WordOff
-thunkHdrSize dflags = fixedHdrSize dflags + smp_hdr
+thunkHdrSize dflags = fixedHdrSizeW dflags + smp_hdr
where smp_hdr = sIZEOF_StgSMPThunkHeader dflags `quot` wORD_SIZE dflags
-
-nonHdrSize :: SMRep -> WordOff
-nonHdrSize (HeapRep _ p np _) = p + np
-nonHdrSize (StackRep bs) = length bs
-nonHdrSize (RTSRep _ rep) = nonHdrSize rep
-
-heapClosureSize :: DynFlags -> SMRep -> WordOff
-heapClosureSize dflags (HeapRep _ p np ty)
+hdrSize :: DynFlags -> SMRep -> ByteOff
+hdrSize dflags rep = wordsToBytes dflags (hdrSizeW dflags rep)
+
+hdrSizeW :: DynFlags -> SMRep -> WordOff
+hdrSizeW dflags (HeapRep _ _ _ ty) = closureTypeHdrSize dflags ty
+hdrSizeW dflags (ArrayPtrsRep _ _) = arrPtrsHdrSizeW dflags
+hdrSizeW dflags (SmallArrayPtrsRep _) = smallArrPtrsHdrSizeW dflags
+hdrSizeW dflags (ArrayWordsRep _) = arrWordsHdrSizeW dflags
+hdrSizeW _ _ = panic "SMRep.hdrSizeW"
+
+nonHdrSize :: DynFlags -> SMRep -> ByteOff
+nonHdrSize dflags rep = wordsToBytes dflags (nonHdrSizeW rep)
+
+nonHdrSizeW :: SMRep -> WordOff
+nonHdrSizeW (HeapRep _ p np _) = p + np
+nonHdrSizeW (ArrayPtrsRep elems ct) = elems + ct
+nonHdrSizeW (SmallArrayPtrsRep elems) = elems
+nonHdrSizeW (ArrayWordsRep words) = words
+nonHdrSizeW (StackRep bs) = length bs
+nonHdrSizeW (RTSRep _ rep) = nonHdrSizeW rep
+
+-- | The total size of the closure, in words.
+heapClosureSizeW :: DynFlags -> SMRep -> WordOff
+heapClosureSizeW dflags (HeapRep _ p np ty)
= closureTypeHdrSize dflags ty + p + np
-heapClosureSize _ _ = panic "SMRep.heapClosureSize"
+heapClosureSizeW dflags (ArrayPtrsRep elems ct)
+ = arrPtrsHdrSizeW dflags + elems + ct
+heapClosureSizeW dflags (SmallArrayPtrsRep elems)
+ = smallArrPtrsHdrSizeW dflags + elems
+heapClosureSizeW dflags (ArrayWordsRep words)
+ = arrWordsHdrSizeW dflags + words
+heapClosureSizeW _ _ = panic "SMRep.heapClosureSize"
closureTypeHdrSize :: DynFlags -> ClosureTypeInfo -> WordOff
closureTypeHdrSize dflags ty = case ty of
@@ -306,13 +399,34 @@ closureTypeHdrSize dflags ty = case ty of
ThunkSelector{} -> thunkHdrSize dflags
BlackHole{} -> thunkHdrSize dflags
IndStatic{} -> thunkHdrSize dflags
- _ -> fixedHdrSize dflags
+ _ -> fixedHdrSizeW dflags
-- All thunks use thunkHdrSize, even if they are non-updatable.
-- this is because we don't have separate closure types for
-- updatable vs. non-updatable thunks, so the GC can't tell the
-- difference. If we ever have significant numbers of non-
-- updatable thunks, it might be worth fixing this.
+-- ---------------------------------------------------------------------------
+-- Arrays
+
+-- | The byte offset into the card table of the card for a given element
+card :: DynFlags -> Int -> Int
+card dflags i = i `shiftR` mUT_ARR_PTRS_CARD_BITS dflags
+
+-- | Convert a number of elements to a number of cards, rounding up
+cardRoundUp :: DynFlags -> Int -> Int
+cardRoundUp dflags i =
+ card dflags (i + ((1 `shiftL` mUT_ARR_PTRS_CARD_BITS dflags) - 1))
+
+-- | The size of a card table, in bytes
+cardTableSizeB :: DynFlags -> Int -> ByteOff
+cardTableSizeB dflags elems = cardRoundUp dflags elems
+
+-- | The size of a card table, in words
+cardTableSizeW :: DynFlags -> Int -> WordOff
+cardTableSizeW dflags elems =
+ bytesToWordsRoundUp dflags (cardTableSizeB dflags elems)
+
-----------------------------------------------------------------------------
-- deriving the RTS closure type from an SMRep
@@ -403,6 +517,12 @@ instance Outputable SMRep where
pp_n _ 0 = empty
pp_n s n = int n <+> text s
+ ppr (ArrayPtrsRep size _) = ptext (sLit "ArrayPtrsRep") <+> ppr size
+
+ ppr (SmallArrayPtrsRep size) = ptext (sLit "SmallArrayPtrsRep") <+> ppr size
+
+ ppr (ArrayWordsRep words) = ptext (sLit "ArrayWordsRep") <+> ppr words
+
ppr (StackRep bs) = ptext (sLit "StackRep") <+> ppr bs
ppr (RTSRep ty rep) = ptext (sLit "tag:") <> ppr ty <+> ppr rep
diff --git a/compiler/codeGen/CgUtils.hs b/compiler/codeGen/CgUtils.hs
index 6b36ab09cd22..51b8ed9ec8c3 100644
--- a/compiler/codeGen/CgUtils.hs
+++ b/compiler/codeGen/CgUtils.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, GADTs #-}
+
-----------------------------------------------------------------------------
--
-- Code generator utilities; mostly monadic
@@ -6,7 +8,6 @@
--
-----------------------------------------------------------------------------
-{-# LANGUAGE GADTs #-}
module CgUtils ( fixStgRegisters ) where
#include "HsVersions.h"
diff --git a/compiler/codeGen/CodeGen/Platform/ARM.hs b/compiler/codeGen/CodeGen/Platform/ARM.hs
index 727a43561fd8..5d1148496c2a 100644
--- a/compiler/codeGen/CodeGen/Platform/ARM.hs
+++ b/compiler/codeGen/CodeGen/Platform/ARM.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.ARM where
diff --git a/compiler/codeGen/CodeGen/Platform/NoRegs.hs b/compiler/codeGen/CodeGen/Platform/NoRegs.hs
index c4c63b7572eb..0c85ffbda77a 100644
--- a/compiler/codeGen/CodeGen/Platform/NoRegs.hs
+++ b/compiler/codeGen/CodeGen/Platform/NoRegs.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.NoRegs where
diff --git a/compiler/codeGen/CodeGen/Platform/PPC.hs b/compiler/codeGen/CodeGen/Platform/PPC.hs
index bcbdfe244b09..76a2b020ac37 100644
--- a/compiler/codeGen/CodeGen/Platform/PPC.hs
+++ b/compiler/codeGen/CodeGen/Platform/PPC.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.PPC where
diff --git a/compiler/codeGen/CodeGen/Platform/PPC_Darwin.hs b/compiler/codeGen/CodeGen/Platform/PPC_Darwin.hs
index 42bf22f26c97..a98e558cc10e 100644
--- a/compiler/codeGen/CodeGen/Platform/PPC_Darwin.hs
+++ b/compiler/codeGen/CodeGen/Platform/PPC_Darwin.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.PPC_Darwin where
diff --git a/compiler/codeGen/CodeGen/Platform/SPARC.hs b/compiler/codeGen/CodeGen/Platform/SPARC.hs
index b49af144096b..991f515eaf4d 100644
--- a/compiler/codeGen/CodeGen/Platform/SPARC.hs
+++ b/compiler/codeGen/CodeGen/Platform/SPARC.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.SPARC where
diff --git a/compiler/codeGen/CodeGen/Platform/X86.hs b/compiler/codeGen/CodeGen/Platform/X86.hs
index 6dd74df130a5..e74807ff88d5 100644
--- a/compiler/codeGen/CodeGen/Platform/X86.hs
+++ b/compiler/codeGen/CodeGen/Platform/X86.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.X86 where
diff --git a/compiler/codeGen/CodeGen/Platform/X86_64.hs b/compiler/codeGen/CodeGen/Platform/X86_64.hs
index 190d642ea6ca..102132d6797b 100644
--- a/compiler/codeGen/CodeGen/Platform/X86_64.hs
+++ b/compiler/codeGen/CodeGen/Platform/X86_64.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module CodeGen.Platform.X86_64 where
diff --git a/compiler/codeGen/StgCmm.hs b/compiler/codeGen/StgCmm.hs
index a92f80439bfa..efc89fe04a82 100644
--- a/compiler/codeGen/StgCmm.hs
+++ b/compiler/codeGen/StgCmm.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation
@@ -37,7 +39,6 @@ import DataCon
import Name
import TyCon
import Module
-import ErrUtils
import Outputable
import Stream
import BasicTypes
@@ -60,9 +61,7 @@ codeGen :: DynFlags
codeGen dflags this_mod data_tycons
cost_centre_info stg_binds hpc_info
- = do { liftIO $ showPass dflags "New CodeGen"
-
- -- cg: run the code generator, and yield the resulting CmmGroup
+ = do { -- cg: run the code generator, and yield the resulting CmmGroup
-- Using an IORef to store the state is a bit crude, but otherwise
-- we would need to add a state monad layer.
; cgref <- liftIO $ newIORef =<< initC
diff --git a/compiler/codeGen/StgCmmBind.hs b/compiler/codeGen/StgCmmBind.hs
index 23367926c763..5fcea81fdd9c 100644
--- a/compiler/codeGen/StgCmmBind.hs
+++ b/compiler/codeGen/StgCmmBind.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation: bindings
@@ -29,7 +31,7 @@ import StgCmmClosure
import StgCmmForeign (emitPrimCall)
import MkGraph
-import CoreSyn ( AltCon(..) )
+import CoreSyn ( AltCon(..), tickishIsCode )
import SMRep
import Cmm
import CmmInfo
@@ -46,7 +48,6 @@ import Util
import BasicTypes
import Outputable
import FastString
-import Maybes
import DynFlags
import Control.Monad
@@ -262,14 +263,22 @@ mkRhsClosure dflags bndr _cc _bi
[NonVoid the_fv] -- Just one free var
upd_flag -- Updatable thunk
[] -- A thunk
- (StgCase (StgApp scrutinee [{-no args-}])
- _ _ _ _ -- ignore uniq, etc.
- (AlgAlt _)
- [(DataAlt _, params, _use_mask,
- (StgApp selectee [{-no args-}]))])
- | the_fv == scrutinee -- Scrutinee is the only free variable
- && maybeToBool maybe_offset -- Selectee is a component of the tuple
- && offset_into_int <= mAX_SPEC_SELECTEE_SIZE dflags -- Offset is small enough
+ expr
+ | let strip = snd . stripStgTicksTop (not . tickishIsCode) -- ignore non-code ticks
+ , StgCase (StgApp scrutinee [{-no args-}])
+ _ _ _ _ -- ignore uniq, etc.
+ (AlgAlt _)
+ [(DataAlt _, params, _use_mask, sel_expr)] <- strip expr
+ , StgApp selectee [{-no args-}] <- strip sel_expr
+ , the_fv == scrutinee -- Scrutinee is the only free variable
+
+ , let (_, _, params_w_offsets) = mkVirtConstrOffsets dflags (addIdReps params)
+ -- Just want the layout
+ , Just the_offset <- assocMaybe params_w_offsets (NonVoid selectee)
+
+ , let offset_into_int = bytesToWordsRoundUp dflags the_offset
+ - fixedHdrSizeW dflags
+ , offset_into_int <= mAX_SPEC_SELECTEE_SIZE dflags -- Offset is small enough
= -- NOT TRUE: ASSERT(is_single_constructor)
-- The simplifier may have statically determined that the single alternative
-- is the only possible case and eliminated the others, even if there are
@@ -278,15 +287,8 @@ mkRhsClosure dflags bndr _cc _bi
-- will evaluate to.
--
-- srt is discarded; it must be empty
- cgRhsStdThunk bndr lf_info [StgVarArg the_fv]
- where
- lf_info = mkSelectorLFInfo bndr offset_into_int
- (isUpdatable upd_flag)
- (_, _, params_w_offsets) = mkVirtConstrOffsets dflags (addIdReps params)
- -- Just want the layout
- maybe_offset = assocMaybe params_w_offsets (NonVoid selectee)
- Just the_offset = maybe_offset
- offset_into_int = the_offset - fixedHdrSize dflags
+ let lf_info = mkSelectorLFInfo bndr offset_into_int (isUpdatable upd_flag)
+ in cgRhsStdThunk bndr lf_info [StgVarArg the_fv]
---------- Note [Ap thunks] ------------------
mkRhsClosure dflags bndr _cc _bi
@@ -341,7 +343,7 @@ mkRhsClosure dflags bndr cc _ fvs upd_flag args body
; dflags <- getDynFlags
; let name = idName bndr
descr = closureDescription dflags mod_name name
- fv_details :: [(NonVoid Id, VirtualHpOffset)]
+ fv_details :: [(NonVoid Id, ByteOff)]
(tot_wds, ptr_wds, fv_details)
= mkVirtHeapOffsets dflags (isLFThunk lf_info)
(addIdReps (map unsafe_stripNV reduced_fvs))
@@ -434,7 +436,7 @@ closureCodeBody :: Bool -- whether this is a top-level binding
-> [NonVoid Id] -- incoming args to the closure
-> Int -- arity, including void args
-> StgExpr
- -> [(NonVoid Id, VirtualHpOffset)] -- the closure's free vars
+ -> [(NonVoid Id, ByteOff)] -- the closure's free vars
-> FCode ()
{- There are two main cases for the code for closures.
@@ -472,25 +474,21 @@ closureCodeBody top_lvl bndr cl_info cc args arity body fv_details
\(_offset, node, arg_regs) -> do
-- Emit slow-entry code (for entering a closure through a PAP)
{ mkSlowEntryCode bndr cl_info arg_regs
-
; dflags <- getDynFlags
; let node_points = nodeMustPointToIt dflags lf_info
node' = if node_points then Just node else Nothing
- -- Emit new label that might potentially be a header
- -- of a self-recursive tail call. See Note
- -- [Self-recursive tail calls] in StgCmmExpr
; loop_header_id <- newLabelC
- ; emitLabel loop_header_id
- ; when node_points (ldvEnterClosure cl_info (CmmLocal node))
-- Extend reader monad with information that
-- self-recursive tail calls can be optimized into local
- -- jumps
+ -- jumps. See Note [Self-recursive tail calls] in StgCmmExpr.
; withSelfLoop (bndr, loop_header_id, arg_regs) $ do
{
-- Main payload
; entryHeapCheck cl_info node' arity arg_regs $ do
- { -- ticky after heap check to avoid double counting
- tickyEnterFun cl_info
+ { -- emit LDV code when profiling
+ when node_points (ldvEnterClosure cl_info (CmmLocal node))
+ -- ticky after heap check to avoid double counting
+ ; tickyEnterFun cl_info
; enterCostCentreFun cc
(CmmMachOp (mo_wordSub dflags)
[ CmmReg (CmmLocal node) -- See [NodeReg clobbered with loopification]
@@ -518,10 +516,10 @@ closureCodeBody top_lvl bndr cl_info cc args arity body fv_details
-- A function closure pointer may be tagged, so we
-- must take it into account when accessing the free variables.
-bind_fv :: (NonVoid Id, VirtualHpOffset) -> FCode (LocalReg, WordOff)
+bind_fv :: (NonVoid Id, ByteOff) -> FCode (LocalReg, ByteOff)
bind_fv (id, off) = do { reg <- rebindToReg id; return (reg, off) }
-load_fvs :: LocalReg -> LambdaFormInfo -> [(LocalReg, WordOff)] -> FCode ()
+load_fvs :: LocalReg -> LambdaFormInfo -> [(LocalReg, ByteOff)] -> FCode ()
load_fvs node lf_info = mapM_ (\ (reg, off) ->
do dflags <- getDynFlags
let tag = lfDynTag dflags lf_info
@@ -551,11 +549,12 @@ mkSlowEntryCode bndr cl_info arg_regs -- function closure is already in `Node'
(mkLblExpr fast_lbl)
(map (CmmReg . CmmLocal) (node : arg_regs))
(initUpdFrameOff dflags)
- emitProcWithConvention Slow Nothing slow_lbl (node : arg_regs) jump
+ tscope <- getTickScope
+ emitProcWithConvention Slow Nothing slow_lbl (node : arg_regs) (jump, tscope)
| otherwise = return ()
-----------------------------------------
-thunkCode :: ClosureInfo -> [(NonVoid Id, VirtualHpOffset)] -> CostCentreStack
+thunkCode :: ClosureInfo -> [(NonVoid Id, ByteOff)] -> CostCentreStack
-> LocalReg -> Int -> StgExpr -> FCode ()
thunkCode cl_info fv_details _cc node arity body
= do { dflags <- getDynFlags
@@ -624,7 +623,7 @@ emitBlackHoleCode node = do
-- work with profiling.
when eager_blackholing $ do
- emitStore (cmmOffsetW dflags node (fixedHdrSize dflags))
+ emitStore (cmmOffsetW dflags node (fixedHdrSizeW dflags))
(CmmReg (CmmGlobal CurrentTSO))
emitPrimCall [] MO_WriteBarrier []
emitStore node (CmmReg (CmmGlobal EagerBlackholeInfo))
@@ -676,7 +675,7 @@ pushUpdateFrame lbl updatee body
updfr <- getUpdFrameOff
dflags <- getDynFlags
let
- hdr = fixedHdrSize dflags * wORD_SIZE dflags
+ hdr = fixedHdrSize dflags
frame = updfr + hdr + sIZEOF_StgUpdateFrame_NoHdr dflags
--
emitUpdateFrame dflags (CmmStackSlot Old frame) lbl updatee
@@ -685,7 +684,7 @@ pushUpdateFrame lbl updatee body
emitUpdateFrame :: DynFlags -> CmmExpr -> CLabel -> CmmExpr -> FCode ()
emitUpdateFrame dflags frame lbl updatee = do
let
- hdr = fixedHdrSize dflags * wORD_SIZE dflags
+ hdr = fixedHdrSize dflags
off_updatee = hdr + oFFSET_StgUpdateFrame_updatee dflags
--
emitStore frame (mkLblExpr lbl)
diff --git a/compiler/codeGen/StgCmmClosure.hs b/compiler/codeGen/StgCmmClosure.hs
index af9c7b8e07e2..b65d56bae2a9 100644
--- a/compiler/codeGen/StgCmmClosure.hs
+++ b/compiler/codeGen/StgCmmClosure.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, RecordWildCards #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation:
@@ -9,8 +11,6 @@
--
-----------------------------------------------------------------------------
-{-# LANGUAGE RecordWildCards #-}
-
module StgCmmClosure (
DynTag, tagForCon, isSmallFamily,
ConTagZ, dataConTagZ,
@@ -450,25 +450,27 @@ in TcGenDeriv.) -}
whether or not it has free variables, and whether we're running
sequentially or in parallel.
-Closure Node Argument Enter
-Characteristics Par Req'd Passing Via
--------------------------------------------------------------------------------
-Unknown & no & yes & stack & node
-Known fun (>1 arg), no fvs & no & no & registers & fast entry (enough args)
- & slow entry (otherwise)
-Known fun (>1 arg), fvs & no & yes & registers & fast entry (enough args)
-0 arg, no fvs \r,\s & no & no & n/a & direct entry
-0 arg, no fvs \u & no & yes & n/a & node
-0 arg, fvs \r,\s & no & yes & n/a & direct entry
-0 arg, fvs \u & no & yes & n/a & node
-Unknown & yes & yes & stack & node
-Known fun (>1 arg), no fvs & yes & no & registers & fast entry (enough args)
- & slow entry (otherwise)
-Known fun (>1 arg), fvs & yes & yes & registers & node
-0 arg, no fvs \r,\s & yes & no & n/a & direct entry
-0 arg, no fvs \u & yes & yes & n/a & node
-0 arg, fvs \r,\s & yes & yes & n/a & node
-0 arg, fvs \u & yes & yes & n/a & node
+Closure Node Argument Enter
+Characteristics Par Req'd Passing Via
+---------------------------------------------------------------------------
+Unknown & no & yes & stack & node
+Known fun (>1 arg), no fvs & no & no & registers & fast entry (enough args)
+ & slow entry (otherwise)
+Known fun (>1 arg), fvs & no & yes & registers & fast entry (enough args)
+0 arg, no fvs \r,\s & no & no & n/a & direct entry
+0 arg, no fvs \u & no & yes & n/a & node
+0 arg, fvs \r,\s,selector & no & yes & n/a & node
+0 arg, fvs \r,\s & no & yes & n/a & direct entry
+0 arg, fvs \u & no & yes & n/a & node
+Unknown & yes & yes & stack & node
+Known fun (>1 arg), no fvs & yes & no & registers & fast entry (enough args)
+ & slow entry (otherwise)
+Known fun (>1 arg), fvs & yes & yes & registers & node
+0 arg, fvs \r,\s,selector & yes & yes & n/a & node
+0 arg, no fvs \r,\s & yes & no & n/a & direct entry
+0 arg, no fvs \u & yes & yes & n/a & node
+0 arg, fvs \r,\s & yes & yes & n/a & node
+0 arg, fvs \u & yes & yes & n/a & node
When black-holing, single-entry closures could also be entered via node
(rather than directly) to catch double-entry. -}
@@ -519,7 +521,8 @@ getCallMethod dflags _name _ lf_info _n_args _cg_loc _self_loop_info
-- fetched since we allocated it.
EnterIt
-getCallMethod dflags name id (LFReEntrant _ arity _ _) n_args _cg_loc _self_loop_info
+getCallMethod dflags name id (LFReEntrant _ arity _ _) n_args _cg_loc
+ _self_loop_info
| n_args == 0 = ASSERT( arity /= 0 )
ReturnIt -- No args at all
| n_args < arity = SlowCall -- Not enough args
@@ -531,7 +534,8 @@ getCallMethod _ _name _ LFUnLifted n_args _cg_loc _self_loop_info
getCallMethod _ _name _ (LFCon _) n_args _cg_loc _self_loop_info
= ASSERT( n_args == 0 ) ReturnIt
-getCallMethod dflags name id (LFThunk _ _ updatable std_form_info is_fun) n_args _cg_loc _self_loop_info
+getCallMethod dflags name id (LFThunk _ _ updatable std_form_info is_fun)
+ n_args _cg_loc _self_loop_info
| is_fun -- it *might* be a function, so we must "call" it (which is always safe)
= SlowCall -- We cannot just enter it [in eval/apply, the entry code
-- is the fast-entry code]
@@ -544,6 +548,12 @@ getCallMethod dflags name id (LFThunk _ _ updatable std_form_info is_fun) n_args
of jumping directly to the entry code is still valid. --SDM
-}
= EnterIt
+
+ -- even a non-updatable selector thunk can be updated by the garbage
+ -- collector, so we must enter it. (#8817)
+ | SelectorThunk{} <- std_form_info
+ = EnterIt
+
-- We used to have ASSERT( n_args == 0 ), but actually it is
-- possible for the optimiser to generate
-- let bot :: Int = error Int "urk"
@@ -553,7 +563,8 @@ getCallMethod dflags name id (LFThunk _ _ updatable std_form_info is_fun) n_args
| otherwise -- Jump direct to code for single-entry thunks
= ASSERT( n_args == 0 )
- DirectEntry (thunkEntryLabel dflags name (idCafInfo id) std_form_info updatable) 0
+ DirectEntry (thunkEntryLabel dflags name (idCafInfo id) std_form_info
+ updatable) 0
getCallMethod _ _name _ (LFUnknown True) _n_arg _cg_locs _self_loop_info
= SlowCall -- might be a function
@@ -562,7 +573,8 @@ getCallMethod _ name _ (LFUnknown False) n_args _cg_loc _self_loop_info
= ASSERT2( n_args == 0, ppr name <+> ppr n_args )
EnterIt -- Not a function
-getCallMethod _ _name _ LFLetNoEscape _n_args (LneLoc blk_id lne_regs) _self_loop_info
+getCallMethod _ _name _ LFLetNoEscape _n_args (LneLoc blk_id lne_regs)
+ _self_loop_info
= JumpToIt blk_id lne_regs
getCallMethod _ _ _ _ _ _ _ = panic "Unknown call method"
diff --git a/compiler/codeGen/StgCmmCon.hs b/compiler/codeGen/StgCmmCon.hs
index eb00bbf0c0a2..edd064848fc5 100644
--- a/compiler/codeGen/StgCmmCon.hs
+++ b/compiler/codeGen/StgCmmCon.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C--: code generation for constructors
@@ -21,6 +23,7 @@ import CoreSyn ( AltCon(..) )
import StgCmmMonad
import StgCmmEnv
import StgCmmHeap
+import StgCmmLayout
import StgCmmUtils
import StgCmmClosure
import StgCmmProf ( curCCS )
@@ -187,9 +190,9 @@ buildDynCon' dflags platform binder _ _cc con [arg]
, StgLitArg (MachInt val) <- arg
, val <= fromIntegral (mAX_INTLIKE dflags) -- Comparisons at type Integer!
, val >= fromIntegral (mIN_INTLIKE dflags) -- ...ditto...
- = do { let intlike_lbl = mkCmmClosureLabel rtsPackageId (fsLit "stg_INTLIKE")
+ = do { let intlike_lbl = mkCmmClosureLabel rtsPackageKey (fsLit "stg_INTLIKE")
val_int = fromIntegral val :: Int
- offsetW = (val_int - mIN_INTLIKE dflags) * (fixedHdrSize dflags + 1)
+ offsetW = (val_int - mIN_INTLIKE dflags) * (fixedHdrSizeW dflags + 1)
-- INTLIKE closures consist of a header and one word payload
intlike_amode = cmmLabelOffW dflags intlike_lbl offsetW
; return ( litIdInfo dflags binder (mkConLFInfo con) intlike_amode
@@ -202,8 +205,8 @@ buildDynCon' dflags platform binder _ _cc con [arg]
, let val_int = ord val :: Int
, val_int <= mAX_CHARLIKE dflags
, val_int >= mIN_CHARLIKE dflags
- = do { let charlike_lbl = mkCmmClosureLabel rtsPackageId (fsLit "stg_CHARLIKE")
- offsetW = (val_int - mIN_CHARLIKE dflags) * (fixedHdrSize dflags + 1)
+ = do { let charlike_lbl = mkCmmClosureLabel rtsPackageKey (fsLit "stg_CHARLIKE")
+ offsetW = (val_int - mIN_CHARLIKE dflags) * (fixedHdrSizeW dflags + 1)
-- CHARLIKE closures consist of a header and one word payload
charlike_amode = cmmLabelOffW dflags charlike_lbl offsetW
; return ( litIdInfo dflags binder (mkConLFInfo con) charlike_amode
diff --git a/compiler/codeGen/StgCmmEnv.hs b/compiler/codeGen/StgCmmEnv.hs
index 2b8677c408ca..4127b6740182 100644
--- a/compiler/codeGen/StgCmmEnv.hs
+++ b/compiler/codeGen/StgCmmEnv.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation: the binding environment
diff --git a/compiler/codeGen/StgCmmExpr.hs b/compiler/codeGen/StgCmmExpr.hs
index cc32a1445bf8..278b10c937f7 100644
--- a/compiler/codeGen/StgCmmExpr.hs
+++ b/compiler/codeGen/StgCmmExpr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation: expressions
@@ -45,6 +47,7 @@ import FastString
import Outputable
import Control.Monad (when,void)
+import Control.Arrow (first)
------------------------------------------------------------------------
-- cgExpr: the main function
@@ -60,10 +63,7 @@ cgExpr (StgOpApp (StgPrimOp SeqOp) [StgVarArg a, _] _res_ty) =
cgExpr (StgOpApp op args ty) = cgOpApp op args ty
cgExpr (StgConApp con args) = cgConApp con args
-cgExpr (StgSCC cc tick push expr) = do { emitSetCCC cc tick push; cgExpr expr }
-cgExpr (StgTick m n expr) = do dflags <- getDynFlags
- emit (mkTickBox dflags m n)
- cgExpr expr
+cgExpr (StgTick t e) = cgTick t >> cgExpr e
cgExpr (StgLit lit) = do cmm_lit <- cgLit lit
emitReturn [CmmLit cmm_lit]
@@ -127,8 +127,8 @@ cgLetNoEscapeRhs
cgLetNoEscapeRhs join_id local_cc bndr rhs =
do { (info, rhs_code) <- cgLetNoEscapeRhsBody local_cc bndr rhs
; let (bid, _) = expectJust "cgLetNoEscapeRhs" $ maybeLetNoEscape info
- ; let code = do { body <- getCode rhs_code
- ; emitOutOfLine bid (body <*> mkBranch join_id) }
+ ; let code = do { (_, body) <- getCodeScoped rhs_code
+ ; emitOutOfLine bid (first (<*> mkBranch join_id) body) }
; return (info, code)
}
@@ -422,8 +422,8 @@ cgCase scrut bndr alt_type alts
; up_hp_usg <- getVirtHp -- Upstream heap usage
; let ret_bndrs = chooseReturnBndrs bndr alt_type alts
alt_regs = map (idToReg dflags) ret_bndrs
- simple_scrut = isSimpleScrut scrut alt_type
- do_gc | not simple_scrut = True
+ ; simple_scrut <- isSimpleScrut scrut alt_type
+ ; let do_gc | not simple_scrut = True
| isSingleton alts = False
| up_hp_usg > 0 = False
| otherwise = True
@@ -450,6 +450,13 @@ recover any unused heap before passing control to the sequel. If we
don't do this, then any unused heap will become slop because the heap
check will reset the heap usage. Slop in the heap breaks LDV profiling
(+RTS -hb) which needs to do a linear sweep through the nursery.
+
+
+Note [Inlining out-of-line primops and heap checks]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+If shouldInlinePrimOp returns True when called from StgCmmExpr for the
+purpose of heap check placement, we *must* inline the primop later in
+StgCmmPrim. If we don't things will go wrong.
-}
-----------------
@@ -460,21 +467,25 @@ maybeSaveCostCentre simple_scrut
-----------------
-isSimpleScrut :: StgExpr -> AltType -> Bool
+isSimpleScrut :: StgExpr -> AltType -> FCode Bool
-- Simple scrutinee, does not block or allocate; hence safe to amalgamate
-- heap usage from alternatives into the stuff before the case
-- NB: if you get this wrong, and claim that the expression doesn't allocate
-- when it does, you'll deeply mess up allocation
-isSimpleScrut (StgOpApp op _ _) _ = isSimpleOp op
-isSimpleScrut (StgLit _) _ = True -- case 1# of { 0# -> ..; ... }
-isSimpleScrut (StgApp _ []) (PrimAlt _) = True -- case x# of { 0# -> ..; ... }
-isSimpleScrut _ _ = False
+isSimpleScrut (StgOpApp op args _) _ = isSimpleOp op args
+isSimpleScrut (StgLit _) _ = return True -- case 1# of { 0# -> ..; ... }
+isSimpleScrut (StgApp _ []) (PrimAlt _) = return True -- case x# of { 0# -> ..; ... }
+isSimpleScrut _ _ = return False
-isSimpleOp :: StgOp -> Bool
+isSimpleOp :: StgOp -> [StgArg] -> FCode Bool
-- True iff the op cannot block or allocate
-isSimpleOp (StgFCallOp (CCall (CCallSpec _ _ safe)) _) = not (playSafe safe)
-isSimpleOp (StgPrimOp op) = not (primOpOutOfLine op)
-isSimpleOp (StgPrimCallOp _) = False
+isSimpleOp (StgFCallOp (CCall (CCallSpec _ _ safe)) _) _ = return $! not (playSafe safe)
+isSimpleOp (StgPrimOp op) stg_args = do
+ arg_exprs <- getNonVoidArgAmodes stg_args
+ dflags <- getDynFlags
+ -- See Note [Inlining out-of-line primops and heap checks]
+ return $! isJust $ shouldInlinePrimOp dflags op arg_exprs
+isSimpleOp (StgPrimCallOp _) _ = return False
-----------------
chooseReturnBndrs :: Id -> AltType -> [StgAlt] -> [NonVoid Id]
@@ -574,8 +585,8 @@ cgAlts _ _ _ _ = panic "cgAlts"
-------------------
cgAlgAltRhss :: (GcPlan,ReturnKind) -> NonVoid Id -> [StgAlt]
- -> FCode ( Maybe CmmAGraph
- , [(ConTagZ, CmmAGraph)] )
+ -> FCode ( Maybe CmmAGraphScoped
+ , [(ConTagZ, CmmAGraphScoped)] )
cgAlgAltRhss gc_plan bndr alts
= do { tagged_cmms <- cgAltRhss gc_plan bndr alts
@@ -594,14 +605,14 @@ cgAlgAltRhss gc_plan bndr alts
-------------------
cgAltRhss :: (GcPlan,ReturnKind) -> NonVoid Id -> [StgAlt]
- -> FCode [(AltCon, CmmAGraph)]
+ -> FCode [(AltCon, CmmAGraphScoped)]
cgAltRhss gc_plan bndr alts = do
dflags <- getDynFlags
let
base_reg = idToReg dflags bndr
- cg_alt :: StgAlt -> FCode (AltCon, CmmAGraph)
+ cg_alt :: StgAlt -> FCode (AltCon, CmmAGraphScoped)
cg_alt (con, bndrs, _uses, rhs)
- = getCodeR $
+ = getCodeScoped $
maybeAltHeapCheck gc_plan $
do { _ <- bindConArgs con base_reg bndrs
; _ <- cgExpr rhs
@@ -737,10 +748,16 @@ cgIdApp fun_id args = do
--
-- * Whenever we are compiling a function, we set that information to reflect
-- the fact that function currently being compiled can be jumped to, instead
--- of called. We also have to emit a label to which we will be jumping. Both
--- things are done in closureCodyBody in StgCmmBind.
+-- of called. This is done in closureCodyBody in StgCmmBind.
--
--- * When we began compilation of another closure we remove the additional
+-- * We also have to emit a label to which we will be jumping. We make sure
+-- that the label is placed after a stack check but before the heap
+-- check. The reason is that making a recursive tail-call does not increase
+-- the stack so we only need to check once. But it may grow the heap, so we
+-- have to repeat the heap check in every self-call. This is done in
+-- do_checks in StgCmmHeap.
+--
+-- * When we begin compilation of another closure we remove the additional
-- information from the environment. This is done by forkClosureBody
-- in StgCmmMonad. Other functions that duplicate the environment -
-- forkLneBody, forkAlts, codeOnly - duplicate that information. In other
@@ -755,8 +772,8 @@ cgIdApp fun_id args = do
-- arity. (d) loopification is turned on via -floopification command-line
-- option.
--
--- * Command line option to control turn loopification on and off is
--- implemented in DynFlags
+-- * Command line option to turn loopification on and off is implemented in
+-- DynFlags.
--
@@ -820,12 +837,28 @@ emitEnter fun = do
-- inlined in the RHS of the R1 assignment.
; let entry = entryCode dflags (closureInfoPtr dflags (CmmReg nodeReg))
the_call = toCall entry (Just lret) updfr_off off outArgs regs
+ ; tscope <- getTickScope
; emit $
copyout <*>
mkCbranch (cmmIsTagged dflags (CmmReg nodeReg)) lret lcall <*>
- outOfLine lcall the_call <*>
- mkLabel lret <*>
+ outOfLine lcall (the_call,tscope) <*>
+ mkLabel lret tscope <*>
copyin
; return (ReturnedTo lret off)
}
}
+
+------------------------------------------------------------------------
+-- Ticks
+------------------------------------------------------------------------
+
+cgTick :: Tickish Id -> FCode ()
+cgTick tick
+ = do { dflags <- getDynFlags
+ ; case tick of
+ ProfNote cc t p -> emitSetCCC cc t p
+ HpcTick m n -> emit (mkTickBox dflags m n)
+ SourceNote s n -> emitTick $ SourceNote s n
+ CoreNote b n -> emitTick $ CoreNote b n
+ _other -> return () -- ignore
+ }
diff --git a/compiler/codeGen/StgCmmExtCode.hs b/compiler/codeGen/StgCmmExtCode.hs
index df1733978f9f..9801829664aa 100644
--- a/compiler/codeGen/StgCmmExtCode.hs
+++ b/compiler/codeGen/StgCmmExtCode.hs
@@ -16,6 +16,9 @@ module StgCmmExtCode (
loopDecls,
getEnv,
+ withName,
+ getName,
+
newLocal,
newLabel,
newBlockId,
@@ -26,7 +29,7 @@ module StgCmmExtCode (
code,
emit, emitLabel, emitAssign, emitStore,
- getCode, getCodeR,
+ getCode, getCodeR, getCodeScoped,
emitOutOfLine,
withUpdFrameOff, getUpdFrameOff
)
@@ -57,7 +60,7 @@ data Named
= VarN CmmExpr -- ^ Holds CmmLit(CmmLabel ..) which gives the label type,
-- eg, RtsLabel, ForeignLabel, CmmLabel etc.
- | FunN PackageId -- ^ A function name from this package
+ | FunN PackageKey -- ^ A function name from this package
| LabelN BlockId -- ^ A blockid of some code or data.
-- | An environment of named things.
@@ -69,15 +72,15 @@ type Decls = [(FastString,Named)]
-- | Does a computation in the FCode monad, with a current environment
-- and a list of local declarations. Returns the resulting list of declarations.
newtype CmmParse a
- = EC { unEC :: Env -> Decls -> FCode (Decls, a) }
+ = EC { unEC :: String -> Env -> Decls -> FCode (Decls, a) }
type ExtCode = CmmParse ()
returnExtFC :: a -> CmmParse a
-returnExtFC a = EC $ \_ s -> return (s, a)
+returnExtFC a = EC $ \_ _ s -> return (s, a)
thenExtFC :: CmmParse a -> (a -> CmmParse b) -> CmmParse b
-thenExtFC (EC m) k = EC $ \e s -> do (s',r) <- m e s; unEC (k r) e s'
+thenExtFC (EC m) k = EC $ \c e s -> do (s',r) <- m c e s; unEC (k r) c e s'
instance Functor CmmParse where
fmap = liftM
@@ -91,8 +94,8 @@ instance Monad CmmParse where
return = returnExtFC
instance HasDynFlags CmmParse where
- getDynFlags = EC (\_ d -> do dflags <- getDynFlags
- return (d, dflags))
+ getDynFlags = EC (\_ _ d -> do dflags <- getDynFlags
+ return (d, dflags))
-- | Takes the variable decarations and imports from the monad
@@ -103,18 +106,25 @@ instance HasDynFlags CmmParse where
--
loopDecls :: CmmParse a -> CmmParse a
loopDecls (EC fcode) =
- EC $ \e globalDecls -> do
- (_, a) <- F.fixC (\ ~(decls, _) -> fcode (addListToUFM e decls) globalDecls)
+ EC $ \c e globalDecls -> do
+ (_, a) <- F.fixC (\ ~(decls, _) -> fcode c (addListToUFM e decls) globalDecls)
return (globalDecls, a)
-- | Get the current environment from the monad.
getEnv :: CmmParse Env
-getEnv = EC $ \e s -> return (s, e)
+getEnv = EC $ \_ e s -> return (s, e)
+
+-- | Get the current context name from the monad
+getName :: CmmParse String
+getName = EC $ \c _ s -> return (s, c)
+-- | Set context name for a sub-parse
+withName :: String -> CmmParse a -> CmmParse a
+withName c' (EC fcode) = EC $ \_ e s -> fcode c' e s
addDecl :: FastString -> Named -> ExtCode
-addDecl name named = EC $ \_ s -> return ((name, named) : s, ())
+addDecl name named = EC $ \_ _ s -> return ((name, named) : s, ())
-- | Add a new variable to the list of local declarations.
@@ -153,7 +163,7 @@ newBlockId = code F.newLabelC
-- | Add add a local function to the environment.
newFunctionName
:: FastString -- ^ name of the function
- -> PackageId -- ^ package of the current module
+ -> PackageKey -- ^ package of the current module
-> ExtCode
newFunctionName name pkg = addDecl name (FunN pkg)
@@ -193,12 +203,12 @@ lookupName name = do
case lookupUFM env name of
Just (VarN e) -> e
Just (FunN pkg) -> CmmLit (CmmLabel (mkCmmCodeLabel pkg name))
- _other -> CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId name))
+ _other -> CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey name))
-- | Lift an FCode computation into the CmmParse monad
code :: FCode a -> CmmParse a
-code fc = EC $ \_ s -> do
+code fc = EC $ \_ _ s -> do
r <- fc
return (s, r)
@@ -206,7 +216,7 @@ emit :: CmmAGraph -> CmmParse ()
emit = code . F.emit
emitLabel :: BlockId -> CmmParse ()
-emitLabel = code. F.emitLabel
+emitLabel = code . F.emitLabel
emitAssign :: CmmReg -> CmmExpr -> CmmParse ()
emitAssign l r = code (F.emitAssign l r)
@@ -215,21 +225,26 @@ emitStore :: CmmExpr -> CmmExpr -> CmmParse ()
emitStore l r = code (F.emitStore l r)
getCode :: CmmParse a -> CmmParse CmmAGraph
-getCode (EC ec) = EC $ \e s -> do
- ((s',_), gr) <- F.getCodeR (ec e s)
+getCode (EC ec) = EC $ \c e s -> do
+ ((s',_), gr) <- F.getCodeR (ec c e s)
return (s', gr)
getCodeR :: CmmParse a -> CmmParse (a, CmmAGraph)
-getCodeR (EC ec) = EC $ \e s -> do
- ((s', r), gr) <- F.getCodeR (ec e s)
+getCodeR (EC ec) = EC $ \c e s -> do
+ ((s', r), gr) <- F.getCodeR (ec c e s)
+ return (s', (r,gr))
+
+getCodeScoped :: CmmParse a -> CmmParse (a, CmmAGraphScoped)
+getCodeScoped (EC ec) = EC $ \c e s -> do
+ ((s', r), gr) <- F.getCodeScoped (ec c e s)
return (s', (r,gr))
-emitOutOfLine :: BlockId -> CmmAGraph -> CmmParse ()
+emitOutOfLine :: BlockId -> CmmAGraphScoped -> CmmParse ()
emitOutOfLine l g = code (F.emitOutOfLine l g)
withUpdFrameOff :: UpdFrameOffset -> CmmParse () -> CmmParse ()
withUpdFrameOff size inner
- = EC $ \e s -> F.withUpdFrameOff size $ (unEC inner) e s
+ = EC $ \c e s -> F.withUpdFrameOff size $ (unEC inner) c e s
getUpdFrameOff :: CmmParse UpdFrameOffset
getUpdFrameOff = code $ F.getUpdFrameOff
diff --git a/compiler/codeGen/StgCmmForeign.hs b/compiler/codeGen/StgCmmForeign.hs
index a688074b9ed9..0553edf85cf5 100644
--- a/compiler/codeGen/StgCmmForeign.hs
+++ b/compiler/codeGen/StgCmmForeign.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Code generation for foreign calls.
@@ -218,6 +220,7 @@ emitForeignCall safety results target args
k <- newLabelC
let (off, _, copyout) = copyInOflow dflags NativeReturn (Young k) results []
-- see Note [safe foreign call convention]
+ tscope <- getTickScope
emit $
( mkStore (CmmStackSlot (Young k) (widthInBytes (wordWidth dflags)))
(CmmLit (CmmBlock k))
@@ -228,7 +231,7 @@ emitForeignCall safety results target args
, ret_args = off
, ret_off = updfr_off
, intrbl = playInterruptible safety })
- <*> mkLabel k
+ <*> mkLabel k tscope
<*> copyout
)
return (ReturnedTo k off)
@@ -267,7 +270,12 @@ maybe_assign_temp e = do
saveThreadState :: DynFlags -> CmmAGraph
saveThreadState dflags =
-- CurrentTSO->stackobj->sp = Sp;
- mkStore (cmmOffset dflags (CmmLoad (cmmOffset dflags stgCurrentTSO (tso_stackobj dflags)) (bWord dflags)) (stack_SP dflags)) stgSp
+ let stackobj = CmmLoad (cmmOffset dflags stgCurrentTSO (tso_stackobj dflags)) (bWord dflags)
+ sp_fld = cmmOffset dflags stackobj (stack_SP dflags)
+ in (if gopt Opt_Debug dflags
+ then mkUnwind Sp (CmmLoad sp_fld (bWord dflags))
+ else mkNop)
+ <*> mkStore sp_fld stgSp
<*> closeNursery dflags
-- and save the current cost centre stack in the TSO when profiling:
<*> if gopt Opt_SccProfilingOn dflags then
@@ -358,7 +366,7 @@ stack_SP dflags = closureField dflags (oFFSET_StgStack_sp dflags)
closureField :: DynFlags -> ByteOff -> ByteOff
-closureField dflags off = off + fixedHdrSize dflags * wORD_SIZE dflags
+closureField dflags off = off + fixedHdrSize dflags
stgSp, stgHp, stgCurrentTSO, stgCurrentNursery :: CmmExpr
stgSp = CmmReg sp
@@ -405,6 +413,9 @@ add_shim dflags arg_ty expr
| tycon == arrayPrimTyCon || tycon == mutableArrayPrimTyCon
= cmmOffsetB dflags expr (arrPtrsHdrSize dflags)
+ | tycon == smallArrayPrimTyCon || tycon == smallMutableArrayPrimTyCon
+ = cmmOffsetB dflags expr (smallArrPtrsHdrSize dflags)
+
| tycon == byteArrayPrimTyCon || tycon == mutableByteArrayPrimTyCon
= cmmOffsetB dflags expr (arrWordsHdrSize dflags)
diff --git a/compiler/codeGen/StgCmmHeap.hs b/compiler/codeGen/StgCmmHeap.hs
index 55ddfd4f9694..2261b1996393 100644
--- a/compiler/codeGen/StgCmmHeap.hs
+++ b/compiler/codeGen/StgCmmHeap.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Stg to C--: heap management functions
@@ -8,16 +10,15 @@
module StgCmmHeap (
getVirtHp, setVirtHp, setRealHp,
- getHpRelOffset, hpRel,
+ getHpRelOffset,
entryHeapCheck, altHeapCheck, noEscapeHeapCheck, altHeapCheckReturnsTo,
heapStackCheckGen,
entryHeapCheck',
- mkVirtHeapOffsets, mkVirtConstrOffsets,
mkStaticClosureFields, mkStaticClosure,
- allocDynClosure, allocDynClosureCmm,
+ allocDynClosure, allocDynClosureCmm, allocHeapClosure,
emitSetDynHdr
) where
@@ -68,7 +69,7 @@ allocDynClosure
allocDynClosureCmm
:: Maybe Id -> CmmInfoTable -> LambdaFormInfo -> CmmExpr -> CmmExpr
- -> [(CmmExpr, VirtualHpOffset)]
+ -> [(CmmExpr, ByteOff)]
-> FCode CmmExpr -- returns Hp+n
-- allocDynClosure allocates the thing in the heap,
@@ -89,61 +90,70 @@ allocDynClosureCmm
-- significant - see test T4801.
-allocDynClosure mb_id info_tbl lf_info use_cc _blame_cc args_w_offsets
- = do { let (args, offsets) = unzip args_w_offsets
- ; cmm_args <- mapM getArgAmode args -- No void args
- ; allocDynClosureCmm mb_id info_tbl lf_info
- use_cc _blame_cc (zip cmm_args offsets)
- }
+allocDynClosure mb_id info_tbl lf_info use_cc _blame_cc args_w_offsets = do
+ let (args, offsets) = unzip args_w_offsets
+ cmm_args <- mapM getArgAmode args -- No void args
+ allocDynClosureCmm mb_id info_tbl lf_info
+ use_cc _blame_cc (zip cmm_args offsets)
+
-allocDynClosureCmm mb_id info_tbl lf_info use_cc _blame_cc amodes_w_offsets
- = do { virt_hp <- getVirtHp
+allocDynClosureCmm mb_id info_tbl lf_info use_cc _blame_cc amodes_w_offsets = do
+ -- SAY WHAT WE ARE ABOUT TO DO
+ let rep = cit_rep info_tbl
+ tickyDynAlloc mb_id rep lf_info
+ let info_ptr = CmmLit (CmmLabel (cit_lbl info_tbl))
+ allocHeapClosure rep info_ptr use_cc amodes_w_offsets
- -- SAY WHAT WE ARE ABOUT TO DO
- ; let rep = cit_rep info_tbl
- ; tickyDynAlloc mb_id rep lf_info
- ; profDynAlloc rep use_cc
- -- FIND THE OFFSET OF THE INFO-PTR WORD
- ; let info_offset = virt_hp + 1
- -- info_offset is the VirtualHpOffset of the first
- -- word of the new object
- -- Remember, virtHp points to last allocated word,
- -- ie 1 *before* the info-ptr word of new object.
+-- | Low-level heap object allocation.
+allocHeapClosure
+ :: SMRep -- ^ representation of the object
+ -> CmmExpr -- ^ info pointer
+ -> CmmExpr -- ^ cost centre
+ -> [(CmmExpr,ByteOff)] -- ^ payload
+ -> FCode CmmExpr -- ^ returns the address of the object
+allocHeapClosure rep info_ptr use_cc payload = do
+ profDynAlloc rep use_cc
- info_ptr = CmmLit (CmmLabel (cit_lbl info_tbl))
+ virt_hp <- getVirtHp
- -- ALLOCATE THE OBJECT
- ; base <- getHpRelOffset info_offset
- ; emitComment $ mkFastString "allocDynClosure"
- ; emitSetDynHdr base info_ptr use_cc
- ; let (cmm_args, offsets) = unzip amodes_w_offsets
- ; hpStore base cmm_args offsets
+ -- Find the offset of the info-ptr word
+ let info_offset = virt_hp + 1
+ -- info_offset is the VirtualHpOffset of the first
+ -- word of the new object
+ -- Remember, virtHp points to last allocated word,
+ -- ie 1 *before* the info-ptr word of new object.
- -- BUMP THE VIRTUAL HEAP POINTER
- ; dflags <- getDynFlags
- ; setVirtHp (virt_hp + heapClosureSize dflags rep)
+ base <- getHpRelOffset info_offset
+ emitComment $ mkFastString "allocHeapClosure"
+ emitSetDynHdr base info_ptr use_cc
+
+ -- Fill in the fields
+ hpStore base payload
+
+ -- Bump the virtual heap pointer
+ dflags <- getDynFlags
+ setVirtHp (virt_hp + heapClosureSizeW dflags rep)
+
+ return base
- ; getHpRelOffset info_offset
- }
emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
emitSetDynHdr base info_ptr ccs
= do dflags <- getDynFlags
- hpStore base (header dflags) [0..]
+ hpStore base (zip (header dflags) [0, wORD_SIZE dflags ..])
where
header :: DynFlags -> [CmmExpr]
header dflags = [info_ptr] ++ dynProfHdr dflags ccs
-- ToDof: Parallel stuff
-- No ticky header
-hpStore :: CmmExpr -> [CmmExpr] -> [VirtualHpOffset] -> FCode ()
-- Store the item (expr,off) in base[off]
-hpStore base vals offs
- = do dflags <- getDynFlags
- let mk_store val off = mkStore (cmmOffsetW dflags base off) val
- emit (catAGraphs (zipWith mk_store vals offs))
-
+hpStore :: CmmExpr -> [(CmmExpr, ByteOff)] -> FCode ()
+hpStore base vals = do
+ dflags <- getDynFlags
+ sequence_ $
+ [ emitStore (cmmOffsetB dflags base off) val | (val,off) <- vals ]
-----------------------------------------------------------
-- Layout of static closures
@@ -406,7 +416,8 @@ altOrNoEscapeHeapCheck checkYield regs code = do
lret <- newLabelC
let (off, _, copyin) = copyInOflow dflags NativeReturn (Young lret) regs []
lcont <- newLabelC
- emitOutOfLine lret (copyin <*> mkBranch lcont)
+ tscope <- getTickScope
+ emitOutOfLine lret (copyin <*> mkBranch lcont, tscope)
emitLabel lcont
cannedGCReturnsTo checkYield False gc regs lret off code
@@ -506,7 +517,7 @@ generic_gc = mkGcLabel "stg_gc_noregs"
-- | Create a CLabel for calling a garbage collector entry point
mkGcLabel :: String -> CmmExpr
-mkGcLabel s = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit s)))
+mkGcLabel s = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey (fsLit s)))
-------------------------------
heapCheck :: Bool -> Bool -> CmmAGraph -> FCode a -> FCode a
@@ -531,7 +542,7 @@ heapStackCheckGen stk_hwm mb_bytes
lretry <- newLabelC
emitLabel lretry
call <- mkCall generic_gc (GC, GC) [] [] updfr_sz []
- do_checks stk_hwm False mb_bytes (call <*> mkBranch lretry)
+ do_checks stk_hwm False mb_bytes (call <*> mkBranch lretry)
-- Note [Single stack check]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -569,11 +580,11 @@ heapStackCheckGen stk_hwm mb_bytes
-- number of bytes of stack that the function will use, so we use a
-- special late-bound CmmLit, namely
-- CmmHighStackMark
--- to stand for the number of bytes needed. When the stack is made
+-- to stand for the number of bytes needed. When the stack is made
-- manifest, the number of bytes needed is calculated, and used to
-- replace occurrences of CmmHighStackMark
--
--- The (Maybe CmmExpr) passed to do_checks is usually
+-- The (Maybe CmmExpr) passed to do_checks is usually
-- Just (CmmLit CmmHighStackMark)
-- but can also (in certain hand-written RTS functions)
-- Just (CmmLit 8) or some other fixed valuet
@@ -615,21 +626,31 @@ do_checks mb_stk_hwm checkYield mb_alloc_lit do_gc = do
Nothing -> return ()
Just stk_hwm -> tickyStackCheck >> (emit =<< mkCmmIfGoto (sp_oflo stk_hwm) gc_id)
+ -- Emit new label that might potentially be a header
+ -- of a self-recursive tail call.
+ -- See Note [Self-recursive loop header].
+ self_loop_info <- getSelfLoop
+ case self_loop_info of
+ Just (_, loop_header_id, _)
+ | checkYield && isJust mb_stk_hwm -> emitLabel loop_header_id
+ _otherwise -> return ()
+
if (isJust mb_alloc_lit)
then do
tickyHeapCheck
emitAssign hpReg bump_hp
emit =<< mkCmmIfThen hp_oflo (alloc_n <*> mkBranch gc_id)
else do
- when (not (gopt Opt_OmitYields dflags) && checkYield) $ do
+ when (checkYield && not (gopt Opt_OmitYields dflags)) $ do
-- Yielding if HpLim == 0
let yielding = CmmMachOp (mo_wordEq dflags)
[CmmReg (CmmGlobal HpLim),
CmmLit (zeroCLit dflags)]
emit =<< mkCmmIfGoto yielding gc_id
- emitOutOfLine gc_id $
- do_gc -- this is expected to jump back somewhere
+ tscope <- getTickScope
+ emitOutOfLine gc_id
+ (do_gc, tscope) -- this is expected to jump back somewhere
-- Test for stack pointer exhaustion, then
-- bump heap pointer, and test for heap exhaustion
@@ -637,3 +658,27 @@ do_checks mb_stk_hwm checkYield mb_alloc_lit do_gc = do
-- stack check succeeds. Otherwise we might end up
-- with slop at the end of the current block, which can
-- confuse the LDV profiler.
+
+-- Note [Self-recursive loop header]
+-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--
+-- Self-recursive loop header is required by loopification optimization (See
+-- Note [Self-recursive tail calls] in StgCmmExpr). We emit it if:
+--
+-- 1. There is information about self-loop in the FCode environment. We don't
+-- check the binder (first component of the self_loop_info) because we are
+-- certain that if the self-loop info is present then we are compiling the
+-- binder body. Reason: the only possible way to get here with the
+-- self_loop_info present is from closureCodeBody.
+--
+-- 2. checkYield && isJust mb_stk_hwm. checkYield tells us that it is possible
+-- to preempt the heap check (see #367 for motivation behind this check). It
+-- is True for heap checks placed at the entry to a function and
+-- let-no-escape heap checks but false for other heap checks (eg. in case
+-- alternatives or created from hand-written high-level Cmm). The second
+-- check (isJust mb_stk_hwm) is true for heap checks at the entry to a
+-- function and some heap checks created in hand-written Cmm. Otherwise it
+-- is Nothing. In other words the only situation when both conditions are
+-- true is when compiling stack and heap checks at the entry to a
+-- function. This is the only situation when we want to emit a self-loop
+-- label.
diff --git a/compiler/codeGen/StgCmmLayout.hs b/compiler/codeGen/StgCmmLayout.hs
index 54e2e920f91f..2453639a8aaf 100644
--- a/compiler/codeGen/StgCmmLayout.hs
+++ b/compiler/codeGen/StgCmmLayout.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Building info tables.
@@ -15,7 +17,7 @@ module StgCmmLayout (
slowCall, directCall,
- mkVirtHeapOffsets, mkVirtConstrOffsets, getHpRelOffset, hpRel,
+ mkVirtHeapOffsets, mkVirtConstrOffsets, getHpRelOffset,
ArgRep(..), toArgRep, argRepSizeW -- re-exported from StgCmmArgRep
) where
@@ -114,7 +116,8 @@ emitCallWithExtraStack (callConv, retConv) fun args extra_stack
(off, _, copyin) = copyInOflow dflags retConv area res_regs []
copyout = mkCallReturnsTo dflags fun callConv args k off updfr_off
extra_stack
- emit (copyout <*> mkLabel k <*> copyin)
+ tscope <- getTickScope
+ emit (copyout <*> mkLabel k tscope <*> copyin)
return (ReturnedTo k off)
}
@@ -218,15 +221,16 @@ slowCall fun stg_args
let correct_arity = cmmEqWord dflags (funInfoArity dflags fun_iptr)
(mkIntExpr dflags n_args)
+ tscope <- getTickScope
emit (mkCbranch (cmmIsTagged dflags funv) is_tagged_lbl slow_lbl
- <*> mkLabel is_tagged_lbl
+ <*> mkLabel is_tagged_lbl tscope
<*> mkCbranch correct_arity fast_lbl slow_lbl
- <*> mkLabel fast_lbl
+ <*> mkLabel fast_lbl tscope
<*> fast_code
<*> mkBranch end_lbl
- <*> mkLabel slow_lbl
+ <*> mkLabel slow_lbl tscope
<*> slow_code
- <*> mkLabel end_lbl)
+ <*> mkLabel end_lbl tscope)
return r
else do
@@ -357,22 +361,23 @@ slowArgs dflags args -- careful: reps contains voids (V), but args does not
(arg_pat, n) = slowCallPattern (map fst args)
(call_args, rest_args) = splitAt n args
- stg_ap_pat = mkCmmRetInfoLabel rtsPackageId arg_pat
+ stg_ap_pat = mkCmmRetInfoLabel rtsPackageKey arg_pat
this_pat = (N, Just (mkLblExpr stg_ap_pat)) : call_args
save_cccs = [(N, Just (mkLblExpr save_cccs_lbl)), (N, Just curCCS)]
- save_cccs_lbl = mkCmmRetInfoLabel rtsPackageId (fsLit "stg_restore_cccs")
+ save_cccs_lbl = mkCmmRetInfoLabel rtsPackageKey (fsLit "stg_restore_cccs")
-------------------------------------------------------------------------
---- Laying out objects on the heap and stack
-------------------------------------------------------------------------
--- The heap always grows upwards, so hpRel is easy
+-- The heap always grows upwards, so hpRel is easy to compute
hpRel :: VirtualHpOffset -- virtual offset of Hp
-> VirtualHpOffset -- virtual offset of The Thing
-> WordOff -- integer word offset
hpRel hp off = off - hp
getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
+-- See Note [Virtual and real heap pointers] in StgCmmMonad
getHpRelOffset virtual_offset
= do dflags <- getDynFlags
hp_usg <- getHpUsage
@@ -384,7 +389,7 @@ mkVirtHeapOffsets
-> [(PrimRep,a)] -- Things to make offsets for
-> (WordOff, -- _Total_ number of words allocated
WordOff, -- Number of words allocated for *pointers*
- [(NonVoid a, VirtualHpOffset)])
+ [(NonVoid a, ByteOff)])
-- Things with their offsets from start of object in order of
-- increasing offset; BUT THIS MAY BE DIFFERENT TO INPUT ORDER
@@ -397,22 +402,31 @@ mkVirtHeapOffsets
-- than the unboxed things
mkVirtHeapOffsets dflags is_thunk things
- = let non_void_things = filterOut (isVoidRep . fst) things
- (ptrs, non_ptrs) = partition (isGcPtrRep . fst) non_void_things
- (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
- (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
- in
- (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
+ = ( bytesToWordsRoundUp dflags tot_bytes
+ , bytesToWordsRoundUp dflags bytes_of_ptrs
+ , ptrs_w_offsets ++ non_ptrs_w_offsets
+ )
where
- hdr_size | is_thunk = thunkHdrSize dflags
- | otherwise = fixedHdrSize dflags
-
- computeOffset wds_so_far (rep, thing)
- = (wds_so_far + argRepSizeW dflags (toArgRep rep),
- (NonVoid thing, hdr_size + wds_so_far))
-
-mkVirtConstrOffsets :: DynFlags -> [(PrimRep,a)] -> (WordOff, WordOff, [(NonVoid a, VirtualHpOffset)])
--- Just like mkVirtHeapOffsets, but for constructors
+ hdr_words | is_thunk = thunkHdrSize dflags
+ | otherwise = fixedHdrSizeW dflags
+ hdr_bytes = wordsToBytes dflags hdr_words
+
+ non_void_things = filterOut (isVoidRep . fst) things
+ (ptrs, non_ptrs) = partition (isGcPtrRep . fst) non_void_things
+
+ (bytes_of_ptrs, ptrs_w_offsets) =
+ mapAccumL computeOffset 0 ptrs
+ (tot_bytes, non_ptrs_w_offsets) =
+ mapAccumL computeOffset bytes_of_ptrs non_ptrs
+
+ computeOffset bytes_so_far (rep, thing)
+ = (bytes_so_far + wordsToBytes dflags (argRepSizeW dflags (toArgRep rep)),
+ (NonVoid thing, hdr_bytes + bytes_so_far))
+
+-- | Just like mkVirtHeapOffsets, but for constructors
+mkVirtConstrOffsets
+ :: DynFlags -> [(PrimRep,a)]
+ -> (WordOff, WordOff, [(NonVoid a, ByteOff)])
mkVirtConstrOffsets dflags = mkVirtHeapOffsets dflags False
@@ -520,7 +534,7 @@ emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl args body
emitClosureAndInfoTable ::
CmmInfoTable -> Convention -> [LocalReg] -> FCode () -> FCode ()
emitClosureAndInfoTable info_tbl conv args body
- = do { blks <- getCode body
+ = do { (_, blks) <- getCodeScoped body
; let entry_lbl = toEntryLbl (cit_lbl info_tbl)
; emitProcWithConvention conv (Just info_tbl) entry_lbl args blks
}
diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs
index 3d82e6940228..de7c7d85acd7 100644
--- a/compiler/codeGen/StgCmmMonad.hs
+++ b/compiler/codeGen/StgCmmMonad.hs
@@ -1,4 +1,5 @@
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE CPP, GADTs, UnboxedTuples #-}
+
-----------------------------------------------------------------------------
--
-- Monad for Stg to C-- code generation
@@ -19,9 +20,10 @@ module StgCmmMonad (
emit, emitDecl, emitProc,
emitProcWithConvention, emitProcWithStackFrame,
emitOutOfLine, emitAssign, emitStore, emitComment,
+ emitTick, emitUnwind,
getCmm, aGraphToGraph,
- getCodeR, getCode, getHeapUsage,
+ getCodeR, getCode, getCodeScoped, getHeapUsage,
mkCmmIfThenElse, mkCmmIfThen, mkCmmIfGoto,
mkCall, mkCmmCall,
@@ -34,6 +36,7 @@ module StgCmmMonad (
withSequel, getSequel,
setTickyCtrLabel, getTickyCtrLabel,
+ tickScope, getTickScope,
withUpdFrameOff, getUpdFrameOff, initUpdFrameOff,
@@ -73,11 +76,13 @@ import Unique
import UniqSupply
import FastString
import Outputable
+import CoreSyn (RawTickish)
import qualified Control.Applicative as A
import Control.Monad
import Data.List
import Prelude hiding( sequence, succ )
+import Control.Arrow ( first )
infixr 9 `thenC` -- Right-associative!
infixr 9 `thenFC`
@@ -179,10 +184,11 @@ data CgInfoDownwards -- information only passed *downwards* by the monad
cgd_updfr_off :: UpdFrameOffset, -- Size of current update frame
cgd_ticky :: CLabel, -- Current destination for ticky counts
cgd_sequel :: Sequel, -- What to do at end of basic block
- cgd_self_loop :: Maybe SelfLoopInfo -- Which tail calls can be compiled
+ cgd_self_loop :: Maybe SelfLoopInfo,-- Which tail calls can be compiled
-- as local jumps? See Note
-- [Self-recursive tail calls] in
-- StgCmmExpr
+ cgd_tick_scope:: [CmmTickScope] -- Contexts ticks should be added to
}
type CgBindings = IdEnv CgIdInfo
@@ -303,7 +309,8 @@ initCgInfoDown dflags mod
, cgd_updfr_off = initUpdFrameOff dflags
, cgd_ticky = mkTopTickyCtrLabel
, cgd_sequel = initSequel
- , cgd_self_loop = Nothing }
+ , cgd_self_loop = Nothing
+ , cgd_tick_scope= [] }
initSequel :: Sequel
initSequel = Return False
@@ -331,17 +338,47 @@ data CgState
cgs_uniqs :: UniqSupply }
-data HeapUsage =
- HeapUsage {
+data HeapUsage -- See Note [Virtual and real heap pointers]
+ = HeapUsage {
virtHp :: VirtualHpOffset, -- Virtual offset of highest-allocated word
-- Incremented whenever we allocate
realHp :: VirtualHpOffset -- realHp: Virtual offset of real heap ptr
-- Used in instruction addressing modes
- }
+ }
type VirtualHpOffset = WordOff
+{- Note [Virtual and real heap pointers]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The code generator can allocate one or more objects contiguously, performing
+one heap check to cover allocation of all the objects at once. Let's call
+this little chunk of heap space an "allocation chunk". The code generator
+will emit code to
+ * Perform a heap-exhaustion check
+ * Move the heap pointer to the end of the allocation chunk
+ * Allocate multiple objects within the chunk
+
+The code generator uses VirtualHpOffsets to address words within a
+single allocation chunk; these start at one and increase positively.
+The first word of the chunk has VirtualHpOffset=1, the second has
+VirtualHpOffset=2, and so on.
+
+ * The field realHp tracks (the VirtualHpOffset) where the real Hp
+ register is pointing. Typically it'll be pointing to the end of the
+ allocation chunk.
+
+ * The field virtHp gives the VirtualHpOffset of the highest-allocated
+ word so far. It starts at zero (meaning no word has been allocated),
+ and increases whenever an object is allocated.
+
+The difference between realHp and virtHp gives the offset from the
+real Hp register of a particular word in the allocation chunk. This
+is what getHpRelOffset does. Since the returned offset is relative
+to the real Hp register, it is valid only until you change the real
+Hp register. (Changing virtHp doesn't matter.)
+-}
+
initCgState :: UniqSupply -> CgState
initCgState uniqs
@@ -463,7 +500,7 @@ withSelfLoop self_loop code = do
instance HasDynFlags FCode where
getDynFlags = liftM cgd_dflags getInfoDown
-getThisPackage :: FCode PackageId
+getThisPackage :: FCode PackageKey
getThisPackage = liftM thisPackage getDynFlags
withInfoDown :: FCode a -> CgInfoDownwards -> FCode a
@@ -525,6 +562,20 @@ setTickyCtrLabel ticky code = do
info <- getInfoDown
withInfoDown code (info {cgd_ticky = ticky})
+-- ----------------------------------------------------------------------------
+-- Manage tick scopes
+
+getTickScope :: FCode [CmmTickScope]
+getTickScope = do
+ info <- getInfoDown
+ return (cgd_tick_scope info)
+
+tickScope :: FCode a -> FCode a
+tickScope code = do
+ scope <- newUnique
+ info <- getInfoDown
+ withInfoDown code $ info { cgd_tick_scope = scope : cgd_tick_scope info }
+
--------------------------------------------------------
-- Forking
@@ -613,6 +664,20 @@ getCodeR fcode
getCode :: FCode a -> FCode CmmAGraph
getCode fcode = do { (_,stmts) <- getCodeR fcode; return stmts }
+-- | Generate code into a fresh tick scope and gather generated code
+getCodeScoped :: FCode a -> FCode (a, CmmAGraphScoped)
+getCodeScoped fcode
+ = do { state1 <- getState
+ ; ((a, tscope), state2) <-
+ tickScope $
+ flip withState state1 { cgs_stmts = mkNop } $
+ do { a <- fcode
+ ; scp <- getTickScope
+ ; return (a, scp) }
+ ; setState $ state2 { cgs_stmts = cgs_stmts state1 }
+ ; return (a, (cgs_stmts state2, tscope)) }
+
+
-- 'getHeapUsage' applies a function to the amount of heap that it uses.
-- It initialises the heap usage to zeros, and passes on an unchanged
-- heap usage.
@@ -643,7 +708,8 @@ emitCgStmt stmt
}
emitLabel :: BlockId -> FCode ()
-emitLabel id = emitCgStmt (CgLabel id)
+emitLabel id = do tscope <- getTickScope
+ emitCgStmt (CgLabel id tscope)
emitComment :: FastString -> FCode ()
#if 0 /* def DEBUG */
@@ -652,6 +718,15 @@ emitComment s = emitCgStmt (CgStmt (CmmComment s))
emitComment _ = return ()
#endif
+emitTick :: RawTickish -> FCode ()
+emitTick = emitCgStmt . CgStmt . CmmTick
+
+emitUnwind :: GlobalReg -> CmmExpr -> FCode ()
+emitUnwind g e = do
+ dflags <- getDynFlags
+ when (gopt Opt_Debug dflags) $
+ emitCgStmt $ CgStmt $ CmmUnwind g e
+
emitAssign :: CmmReg -> CmmExpr -> FCode ()
emitAssign l r = emitCgStmt (CgStmt (CmmAssign l r))
@@ -673,7 +748,7 @@ emitDecl decl
= do { state <- getState
; setState $ state { cgs_tops = cgs_tops state `snocOL` decl } }
-emitOutOfLine :: BlockId -> CmmAGraph -> FCode ()
+emitOutOfLine :: BlockId -> CmmAGraphScoped -> FCode ()
emitOutOfLine l stmts = emitCgStmt (CgFork l stmts)
emitProcWithStackFrame
@@ -682,7 +757,7 @@ emitProcWithStackFrame
-> CLabel -- label for the proc
-> [CmmFormal] -- stack frame
-> [CmmFormal] -- arguments
- -> CmmAGraph -- code
+ -> CmmAGraphScoped -- code
-> Bool -- do stack layout?
-> FCode ()
@@ -693,22 +768,22 @@ emitProcWithStackFrame _conv mb_info lbl _stk_args [] blocks False
emitProcWithStackFrame conv mb_info lbl stk_args args blocks True -- do layout
= do { dflags <- getDynFlags
; let (offset, live, entry) = mkCallEntry dflags conv args stk_args
- ; emitProc_ mb_info lbl live (entry <*> blocks) offset True
+ ; emitProc_ mb_info lbl live (first (entry <*>) blocks) offset True
}
emitProcWithStackFrame _ _ _ _ _ _ _ = panic "emitProcWithStackFrame"
emitProcWithConvention :: Convention -> Maybe CmmInfoTable -> CLabel
-> [CmmFormal]
- -> CmmAGraph
+ -> CmmAGraphScoped
-> FCode ()
emitProcWithConvention conv mb_info lbl args blocks
= emitProcWithStackFrame conv mb_info lbl [] args blocks True
-emitProc :: Maybe CmmInfoTable -> CLabel -> [GlobalReg] -> CmmAGraph -> Int -> FCode ()
+emitProc :: Maybe CmmInfoTable -> CLabel -> [GlobalReg] -> CmmAGraphScoped -> Int -> FCode ()
emitProc mb_info lbl live blocks offset
= emitProc_ mb_info lbl live blocks offset True
-emitProc_ :: Maybe CmmInfoTable -> CLabel -> [GlobalReg] -> CmmAGraph -> Int -> Bool
+emitProc_ :: Maybe CmmInfoTable -> CLabel -> [GlobalReg] -> CmmAGraphScoped -> Int -> Bool
-> FCode ()
emitProc_ mb_info lbl live blocks offset do_layout
= do { dflags <- getDynFlags
@@ -744,24 +819,27 @@ getCmm code
mkCmmIfThenElse :: CmmExpr -> CmmAGraph -> CmmAGraph -> FCode CmmAGraph
mkCmmIfThenElse e tbranch fbranch = do
+ tscp <- getTickScope
endif <- newLabelC
tid <- newLabelC
fid <- newLabelC
return $ mkCbranch e tid fid <*>
- mkLabel tid <*> tbranch <*> mkBranch endif <*>
- mkLabel fid <*> fbranch <*> mkLabel endif
+ mkLabel tid tscp <*> tbranch <*> mkBranch endif <*>
+ mkLabel fid tscp <*> fbranch <*> mkLabel endif tscp
mkCmmIfGoto :: CmmExpr -> BlockId -> FCode CmmAGraph
mkCmmIfGoto e tid = do
endif <- newLabelC
- return $ mkCbranch e tid endif <*> mkLabel endif
+ tscp <- getTickScope
+ return $ mkCbranch e tid endif <*> mkLabel endif tscp
mkCmmIfThen :: CmmExpr -> CmmAGraph -> FCode CmmAGraph
mkCmmIfThen e tbranch = do
endif <- newLabelC
tid <- newLabelC
+ tscp <- getTickScope
return $ mkCbranch e tid endif <*>
- mkLabel tid <*> tbranch <*> mkLabel endif
+ mkLabel tid tscp <*> tbranch <*> mkLabel endif tscp
mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmActual]
@@ -769,10 +847,11 @@ mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmActual]
mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do
dflags <- getDynFlags
k <- newLabelC
+ tscp <- getTickScope
let area = Young k
(off, _, copyin) = copyInOflow dflags retConv area results []
copyout = mkCallReturnsTo dflags f callConv actuals k off updfr_off extra_stack
- return (copyout <*> mkLabel k <*> copyin)
+ return (copyout <*> mkLabel k tscp <*> copyin)
mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmActual] -> UpdFrameOffset
-> FCode CmmAGraph
@@ -783,7 +862,7 @@ mkCmmCall f results actuals updfr_off
-- ----------------------------------------------------------------------------
-- turn CmmAGraph into CmmGraph, for making a new proc.
-aGraphToGraph :: CmmAGraph -> FCode CmmGraph
+aGraphToGraph :: CmmAGraphScoped -> FCode CmmGraph
aGraphToGraph stmts
= do { l <- newLabelC
; return (labelAGraph l stmts) }
diff --git a/compiler/codeGen/StgCmmPrim.hs b/compiler/codeGen/StgCmmPrim.hs
index 6411e89a5484..9e1242735554 100644
--- a/compiler/codeGen/StgCmmPrim.hs
+++ b/compiler/codeGen/StgCmmPrim.hs
@@ -1,4 +1,6 @@
------------------------------------------------------------------------------
+{-# LANGUAGE CPP #-}
+
+----------------------------------------------------------------------------
--
-- Stg to C--: primitive operations
--
@@ -8,8 +10,9 @@
module StgCmmPrim (
cgOpApp,
- cgPrimOp -- internal(ish), used by cgCase to get code for a
- -- comparison without also turning it into a Bool.
+ cgPrimOp, -- internal(ish), used by cgCase to get code for a
+ -- comparison without also turning it into a Bool.
+ shouldInlinePrimOp
) where
#include "HsVersions.h"
@@ -40,8 +43,8 @@ import FastString
import Outputable
import Util
+import Data.Bits ((.&.), bit)
import Control.Monad (liftM, when)
-import Data.Bits
------------------------------------------------------------------------
-- Primitive operations and foreign calls
@@ -86,36 +89,163 @@ cgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty
-- That won't work.
tycon = tyConAppTyCon res_ty
-cgOpApp (StgPrimOp primop) args res_ty
- | primOpOutOfLine primop
- = do { cmm_args <- getNonVoidArgAmodes args
- ; let fun = CmmLit (CmmLabel (mkRtsPrimOpLabel primop))
- ; emitCall (NativeNodeCall, NativeReturn) fun cmm_args }
-
- | ReturnsPrim VoidRep <- result_info
- = do cgPrimOp [] primop args
- emitReturn []
-
- | ReturnsPrim rep <- result_info
- = do dflags <- getDynFlags
- res <- newTemp (primRepCmmType dflags rep)
- cgPrimOp [res] primop args
- emitReturn [CmmReg (CmmLocal res)]
-
- | ReturnsAlg tycon <- result_info, isUnboxedTupleTyCon tycon
- = do (regs, _hints) <- newUnboxedTupleRegs res_ty
- cgPrimOp regs primop args
- emitReturn (map (CmmReg . CmmLocal) regs)
-
- | otherwise = panic "cgPrimop"
- where
- result_info = getPrimOpResultInfo primop
+cgOpApp (StgPrimOp primop) args res_ty = do
+ dflags <- getDynFlags
+ cmm_args <- getNonVoidArgAmodes args
+ case shouldInlinePrimOp dflags primop cmm_args of
+ Nothing -> do -- out-of-line
+ let fun = CmmLit (CmmLabel (mkRtsPrimOpLabel primop))
+ emitCall (NativeNodeCall, NativeReturn) fun cmm_args
+
+ Just f -- inline
+ | ReturnsPrim VoidRep <- result_info
+ -> do f []
+ emitReturn []
+
+ | ReturnsPrim rep <- result_info
+ -> do dflags <- getDynFlags
+ res <- newTemp (primRepCmmType dflags rep)
+ f [res]
+ emitReturn [CmmReg (CmmLocal res)]
+
+ | ReturnsAlg tycon <- result_info, isUnboxedTupleTyCon tycon
+ -> do (regs, _hints) <- newUnboxedTupleRegs res_ty
+ f regs
+ emitReturn (map (CmmReg . CmmLocal) regs)
+
+ | otherwise -> panic "cgPrimop"
+ where
+ result_info = getPrimOpResultInfo primop
cgOpApp (StgPrimCallOp primcall) args _res_ty
= do { cmm_args <- getNonVoidArgAmodes args
; let fun = CmmLit (CmmLabel (mkPrimCallLabel primcall))
; emitCall (NativeNodeCall, NativeReturn) fun cmm_args }
+-- | Interpret the argument as an unsigned value, assuming the value
+-- is given in two-complement form in the given width.
+--
+-- Example: @asUnsigned W64 (-1)@ is 18446744073709551615.
+--
+-- This function is used to work around the fact that many array
+-- primops take Int# arguments, but we interpret them as unsigned
+-- quantities in the code gen. This means that we have to be careful
+-- every time we work on e.g. a CmmInt literal that corresponds to the
+-- array size, as it might contain a negative Integer value if the
+-- user passed a value larger than 2^(wORD_SIZE_IN_BITS-1) as the Int#
+-- literal.
+asUnsigned :: Width -> Integer -> Integer
+asUnsigned w n = n .&. (bit (widthInBits w) - 1)
+
+-- TODO: Several primop implementations (e.g. 'doNewByteArrayOp') use
+-- ByteOff (or some other fixed width signed type) to represent
+-- array sizes or indices. This means that these will overflow for
+-- large enough sizes.
+
+-- | Decide whether an out-of-line primop should be replaced by an
+-- inline implementation. This might happen e.g. if there's enough
+-- static information, such as statically know arguments, to emit a
+-- more efficient implementation inline.
+--
+-- Returns 'Nothing' if this primop should use its out-of-line
+-- implementation (defined elsewhere) and 'Just' together with a code
+-- generating function that takes the output regs as arguments
+-- otherwise.
+shouldInlinePrimOp :: DynFlags
+ -> PrimOp -- ^ The primop
+ -> [CmmExpr] -- ^ The primop arguments
+ -> Maybe ([LocalReg] -> FCode ())
+
+shouldInlinePrimOp dflags NewByteArrayOp_Char [(CmmLit (CmmInt n w))]
+ | asUnsigned w n <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> doNewByteArrayOp res (fromInteger n)
+
+shouldInlinePrimOp dflags NewArrayOp [(CmmLit (CmmInt n w)), init]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] ->
+ doNewArrayOp res (arrPtrsRep dflags (fromInteger n)) mkMAP_DIRTY_infoLabel
+ [ (mkIntExpr dflags (fromInteger n),
+ fixedHdrSize dflags + oFFSET_StgMutArrPtrs_ptrs dflags)
+ , (mkIntExpr dflags (nonHdrSizeW (arrPtrsRep dflags (fromInteger n))),
+ fixedHdrSize dflags + oFFSET_StgMutArrPtrs_size dflags)
+ ]
+ (fromInteger n) init
+
+shouldInlinePrimOp _ CopyArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopyArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp _ CopyMutableArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopyMutableArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp _ CopyArrayArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopyArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp _ CopyMutableArrayArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopyMutableArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp dflags CloneArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneArray mkMAP_FROZEN_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags CloneMutableArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneArray mkMAP_DIRTY_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags FreezeArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneArray mkMAP_FROZEN_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags ThawArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneArray mkMAP_DIRTY_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags NewSmallArrayOp [(CmmLit (CmmInt n w)), init]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] ->
+ doNewArrayOp res (smallArrPtrsRep (fromInteger n)) mkSMAP_DIRTY_infoLabel
+ [ (mkIntExpr dflags (fromInteger n),
+ fixedHdrSize dflags + oFFSET_StgSmallMutArrPtrs_ptrs dflags)
+ ]
+ (fromInteger n) init
+
+shouldInlinePrimOp _ CopySmallArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopySmallArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp _ CopySmallMutableArrayOp
+ [src, src_off, dst, dst_off, (CmmLit (CmmInt n _))] =
+ Just $ \ [] -> doCopySmallMutableArrayOp src src_off dst dst_off (fromInteger n)
+
+shouldInlinePrimOp dflags CloneSmallArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneSmallArray mkSMAP_FROZEN_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags CloneSmallMutableArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneSmallArray mkSMAP_DIRTY_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags FreezeSmallArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneSmallArray mkSMAP_FROZEN_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags ThawSmallArrayOp [src, src_off, (CmmLit (CmmInt n w))]
+ | wordsToBytes dflags (asUnsigned w n) <= fromIntegral (maxInlineAllocSize dflags) =
+ Just $ \ [res] -> emitCloneSmallArray mkSMAP_DIRTY_infoLabel res src src_off (fromInteger n)
+
+shouldInlinePrimOp dflags primop args
+ | primOpOutOfLine primop = Nothing
+ | otherwise = Just $ \ regs -> emitPrimOp dflags regs primop args
+
+-- TODO: Several primops, such as 'copyArray#', only have an inline
+-- implementation (below) but could possibly have both an inline
+-- implementation and an out-of-line implementation, just like
+-- 'newArray#'. This would lower the amount of code generated,
+-- hopefully without a performance impact (needs to be measured).
+
---------------------------------------------------
cgPrimOp :: [LocalReg] -- where to put the results
-> PrimOp -- the op
@@ -141,63 +271,6 @@ emitPrimOp :: DynFlags
-- First we handle various awkward cases specially. The remaining
-- easy cases are then handled by translateOp, defined below.
-emitPrimOp dflags [res_r,res_c] IntAddCOp [aa,bb]
-{-
- With some bit-twiddling, we can define int{Add,Sub}Czh portably in
- C, and without needing any comparisons. This may not be the
- fastest way to do it - if you have better code, please send it! --SDM
-
- Return : r = a + b, c = 0 if no overflow, 1 on overflow.
-
- We currently don't make use of the r value if c is != 0 (i.e.
- overflow), we just convert to big integers and try again. This
- could be improved by making r and c the correct values for
- plugging into a new J#.
-
- { r = ((I_)(a)) + ((I_)(b)); \
- c = ((StgWord)(~(((I_)(a))^((I_)(b))) & (((I_)(a))^r))) \
- >> (BITS_IN (I_) - 1); \
- }
- Wading through the mass of bracketry, it seems to reduce to:
- c = ( (~(a^b)) & (a^r) ) >>unsigned (BITS_IN(I_)-1)
-
--}
- = emit $ catAGraphs [
- mkAssign (CmmLocal res_r) (CmmMachOp (mo_wordAdd dflags) [aa,bb]),
- mkAssign (CmmLocal res_c) $
- CmmMachOp (mo_wordUShr dflags) [
- CmmMachOp (mo_wordAnd dflags) [
- CmmMachOp (mo_wordNot dflags) [CmmMachOp (mo_wordXor dflags) [aa,bb]],
- CmmMachOp (mo_wordXor dflags) [aa, CmmReg (CmmLocal res_r)]
- ],
- mkIntExpr dflags (wORD_SIZE_IN_BITS dflags - 1)
- ]
- ]
-
-
-emitPrimOp dflags [res_r,res_c] IntSubCOp [aa,bb]
-{- Similarly:
- #define subIntCzh(r,c,a,b) \
- { r = ((I_)(a)) - ((I_)(b)); \
- c = ((StgWord)((((I_)(a))^((I_)(b))) & (((I_)(a))^r))) \
- >> (BITS_IN (I_) - 1); \
- }
-
- c = ((a^b) & (a^r)) >>unsigned (BITS_IN(I_)-1)
--}
- = emit $ catAGraphs [
- mkAssign (CmmLocal res_r) (CmmMachOp (mo_wordSub dflags) [aa,bb]),
- mkAssign (CmmLocal res_c) $
- CmmMachOp (mo_wordUShr dflags) [
- CmmMachOp (mo_wordAnd dflags) [
- CmmMachOp (mo_wordXor dflags) [aa,bb],
- CmmMachOp (mo_wordXor dflags) [aa, CmmReg (CmmLocal res_r)]
- ],
- mkIntExpr dflags (wORD_SIZE_IN_BITS dflags - 1)
- ]
- ]
-
-
emitPrimOp _ [res] ParOp [arg]
=
-- for now, just implement this in a C function
@@ -231,10 +304,10 @@ emitPrimOp _ [res] GetCurrentCCSOp [_dummy_arg]
= emitAssign (CmmLocal res) curCCS
emitPrimOp dflags [res] ReadMutVarOp [mutv]
- = emitAssign (CmmLocal res) (cmmLoadIndexW dflags mutv (fixedHdrSize dflags) (gcWord dflags))
+ = emitAssign (CmmLocal res) (cmmLoadIndexW dflags mutv (fixedHdrSizeW dflags) (gcWord dflags))
emitPrimOp dflags [] WriteMutVarOp [mutv,var]
- = do emitStore (cmmOffsetW dflags mutv (fixedHdrSize dflags)) var
+ = do emitStore (cmmOffsetW dflags mutv (fixedHdrSizeW dflags)) var
emitCCall
[{-no results-}]
(CmmLit (CmmLabel mkDirty_MUT_VAR_Label))
@@ -243,7 +316,7 @@ emitPrimOp dflags [] WriteMutVarOp [mutv,var]
-- #define sizzeofByteArrayzh(r,a) \
-- r = ((StgArrWords *)(a))->bytes
emitPrimOp dflags [res] SizeofByteArrayOp [arg]
- = emit $ mkAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSize dflags) (bWord dflags))
+ = emit $ mkAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSizeW dflags) (bWord dflags))
-- #define sizzeofMutableByteArrayzh(r,a) \
-- r = ((StgArrWords *)(a))->bytes
@@ -261,14 +334,14 @@ emitPrimOp dflags [res] ByteArrayContents_Char [arg]
-- #define stableNameToIntzh(r,s) (r = ((StgStableName *)s)->sn)
emitPrimOp dflags [res] StableNameToIntOp [arg]
- = emitAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSize dflags) (bWord dflags))
+ = emitAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSizeW dflags) (bWord dflags))
-- #define eqStableNamezh(r,sn1,sn2) \
-- (r = (((StgStableName *)sn1)->sn == ((StgStableName *)sn2)->sn))
emitPrimOp dflags [res] EqStableNameOp [arg1,arg2]
= emitAssign (CmmLocal res) (CmmMachOp (mo_wordEq dflags) [
- cmmLoadIndexW dflags arg1 (fixedHdrSize dflags) (bWord dflags),
- cmmLoadIndexW dflags arg2 (fixedHdrSize dflags) (bWord dflags)
+ cmmLoadIndexW dflags arg1 (fixedHdrSizeW dflags) (bWord dflags),
+ cmmLoadIndexW dflags arg2 (fixedHdrSizeW dflags) (bWord dflags)
])
@@ -296,37 +369,21 @@ emitPrimOp dflags [res] DataToTagOp [arg]
-- }
emitPrimOp _ [res] UnsafeFreezeArrayOp [arg]
= emit $ catAGraphs
- [ setInfo arg (CmmLit (CmmLabel mkMAP_FROZEN_infoLabel)),
+ [ setInfo arg (CmmLit (CmmLabel mkMAP_FROZEN0_infoLabel)),
mkAssign (CmmLocal res) arg ]
emitPrimOp _ [res] UnsafeFreezeArrayArrayOp [arg]
= emit $ catAGraphs
- [ setInfo arg (CmmLit (CmmLabel mkMAP_FROZEN_infoLabel)),
+ [ setInfo arg (CmmLit (CmmLabel mkMAP_FROZEN0_infoLabel)),
+ mkAssign (CmmLocal res) arg ]
+emitPrimOp _ [res] UnsafeFreezeSmallArrayOp [arg]
+ = emit $ catAGraphs
+ [ setInfo arg (CmmLit (CmmLabel mkSMAP_FROZEN0_infoLabel)),
mkAssign (CmmLocal res) arg ]
-- #define unsafeFreezzeByteArrayzh(r,a) r=(a)
emitPrimOp _ [res] UnsafeFreezeByteArrayOp [arg]
= emitAssign (CmmLocal res) arg
--- Copying pointer arrays
-
-emitPrimOp _ [] CopyArrayOp [src,src_off,dst,dst_off,n] =
- doCopyArrayOp src src_off dst dst_off n
-emitPrimOp _ [] CopyMutableArrayOp [src,src_off,dst,dst_off,n] =
- doCopyMutableArrayOp src src_off dst dst_off n
-emitPrimOp _ [res] CloneArrayOp [src,src_off,n] =
- emitCloneArray mkMAP_FROZEN_infoLabel res src src_off n
-emitPrimOp _ [res] CloneMutableArrayOp [src,src_off,n] =
- emitCloneArray mkMAP_DIRTY_infoLabel res src src_off n
-emitPrimOp _ [res] FreezeArrayOp [src,src_off,n] =
- emitCloneArray mkMAP_FROZEN_infoLabel res src src_off n
-emitPrimOp _ [res] ThawArrayOp [src,src_off,n] =
- emitCloneArray mkMAP_DIRTY_infoLabel res src src_off n
-
-emitPrimOp _ [] CopyArrayArrayOp [src,src_off,dst,dst_off,n] =
- doCopyArrayOp src src_off dst dst_off n
-emitPrimOp _ [] CopyMutableArrayArrayOp [src,src_off,dst,dst_off,n] =
- doCopyMutableArrayOp src src_off dst dst_off n
-
-- Reading/writing pointer arrays
emitPrimOp _ [res] ReadArrayOp [obj,ix] = doReadPtrArrayOp res obj ix
@@ -344,8 +401,14 @@ emitPrimOp _ [] WriteArrayArrayOp_MutableByteArray [obj,ix,v] = doWritePt
emitPrimOp _ [] WriteArrayArrayOp_ArrayArray [obj,ix,v] = doWritePtrArrayOp obj ix v
emitPrimOp _ [] WriteArrayArrayOp_MutableArrayArray [obj,ix,v] = doWritePtrArrayOp obj ix v
+emitPrimOp _ [res] ReadSmallArrayOp [obj,ix] = doReadSmallPtrArrayOp res obj ix
+emitPrimOp _ [res] IndexSmallArrayOp [obj,ix] = doReadSmallPtrArrayOp res obj ix
+emitPrimOp _ [] WriteSmallArrayOp [obj,ix,v] = doWriteSmallPtrArrayOp obj ix v
+
+-- Getting the size of pointer arrays
+
emitPrimOp dflags [res] SizeofArrayOp [arg]
- = emit $ mkAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSize dflags + oFFSET_StgMutArrPtrs_ptrs dflags) (bWord dflags))
+ = emit $ mkAssign (CmmLocal res) (cmmLoadIndexW dflags arg (fixedHdrSizeW dflags + oFFSET_StgMutArrPtrs_ptrs dflags) (bWord dflags))
emitPrimOp dflags [res] SizeofMutableArrayOp [arg]
= emitPrimOp dflags [res] SizeofArrayOp [arg]
emitPrimOp dflags [res] SizeofArrayArrayOp [arg]
@@ -353,6 +416,13 @@ emitPrimOp dflags [res] SizeofArrayArrayOp [arg]
emitPrimOp dflags [res] SizeofMutableArrayArrayOp [arg]
= emitPrimOp dflags [res] SizeofArrayOp [arg]
+emitPrimOp dflags [res] SizeofSmallArrayOp [arg] =
+ emit $ mkAssign (CmmLocal res)
+ (cmmLoadIndexW dflags arg
+ (fixedHdrSizeW dflags + oFFSET_StgSmallMutArrPtrs_ptrs dflags) (bWord dflags))
+emitPrimOp dflags [res] SizeofSmallMutableArrayOp [arg] =
+ emitPrimOp dflags [res] SizeofSmallArrayOp [arg]
+
-- IndexXXXoffAddr
emitPrimOp dflags res IndexOffAddrOp_Char args = doIndexOffAddrOp (Just (mo_u_8ToWord dflags)) b8 res args
@@ -493,6 +563,20 @@ emitPrimOp _ [res] PopCnt32Op [w] = emitPopCntCall res w W32
emitPrimOp _ [res] PopCnt64Op [w] = emitPopCntCall res w W64
emitPrimOp dflags [res] PopCntOp [w] = emitPopCntCall res w (wordWidth dflags)
+-- count leading zeros
+emitPrimOp _ [res] Clz8Op [w] = emitClzCall res w W8
+emitPrimOp _ [res] Clz16Op [w] = emitClzCall res w W16
+emitPrimOp _ [res] Clz32Op [w] = emitClzCall res w W32
+emitPrimOp _ [res] Clz64Op [w] = emitClzCall res w W64
+emitPrimOp dflags [res] ClzOp [w] = emitClzCall res w (wordWidth dflags)
+
+-- count trailing zeros
+emitPrimOp _ [res] Ctz8Op [w] = emitCtzCall res w W8
+emitPrimOp _ [res] Ctz16Op [w] = emitCtzCall res w W16
+emitPrimOp _ [res] Ctz32Op [w] = emitCtzCall res w W32
+emitPrimOp _ [res] Ctz64Op [w] = emitCtzCall res w W64
+emitPrimOp dflags [res] CtzOp [w] = emitCtzCall res w (wordWidth dflags)
+
-- Unsigned int to floating point conversions
emitPrimOp _ [res] Word2FloatOp [w] = emitPrimCall [res]
(MO_UF_Conv W32) [w]
@@ -663,6 +747,25 @@ emitPrimOp _ res PrefetchByteArrayOp0 args = doPrefetchByteArrayOp 0 res
emitPrimOp _ res PrefetchMutableByteArrayOp0 args = doPrefetchByteArrayOp 0 res args
emitPrimOp _ res PrefetchAddrOp0 args = doPrefetchAddrOp 0 res args
+-- Atomic read-modify-write
+emitPrimOp dflags [res] FetchAddByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_Add mba ix (bWord dflags) n
+emitPrimOp dflags [res] FetchSubByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_Sub mba ix (bWord dflags) n
+emitPrimOp dflags [res] FetchAndByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_And mba ix (bWord dflags) n
+emitPrimOp dflags [res] FetchNandByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_Nand mba ix (bWord dflags) n
+emitPrimOp dflags [res] FetchOrByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_Or mba ix (bWord dflags) n
+emitPrimOp dflags [res] FetchXorByteArrayOp_Int [mba, ix, n] =
+ doAtomicRMW res AMO_Xor mba ix (bWord dflags) n
+emitPrimOp dflags [res] AtomicReadByteArrayOp_Int [mba, ix] =
+ doAtomicReadByteArray res mba ix (bWord dflags)
+emitPrimOp dflags [] AtomicWriteByteArrayOp_Int [mba, ix, val] =
+ doAtomicWriteByteArray mba ix (bWord dflags) val
+emitPrimOp dflags [res] CasByteArrayOp_Int [mba, ix, old, new] =
+ doCasByteArray res mba ix (bWord dflags) old new
-- The rest just translate straightforwardly
emitPrimOp dflags [res] op [arg]
@@ -703,6 +806,10 @@ callishPrimOpSupported dflags op
WordAdd2Op | ncg && x86ish -> Left (MO_Add2 (wordWidth dflags))
| otherwise -> Right genericWordAdd2Op
+ IntAddCOp -> Right genericIntAddCOp
+
+ IntSubCOp -> Right genericIntSubCOp
+
WordMul2Op | ncg && x86ish -> Left (MO_U_Mul2 (wordWidth dflags))
| otherwise -> Right genericWordMul2Op
@@ -808,6 +915,67 @@ genericWordAdd2Op [res_h, res_l] [arg_x, arg_y]
(bottomHalf (CmmReg (CmmLocal r1))))]
genericWordAdd2Op _ _ = panic "genericWordAdd2Op"
+genericIntAddCOp :: GenericOp
+genericIntAddCOp [res_r, res_c] [aa, bb]
+{-
+ With some bit-twiddling, we can define int{Add,Sub}Czh portably in
+ C, and without needing any comparisons. This may not be the
+ fastest way to do it - if you have better code, please send it! --SDM
+
+ Return : r = a + b, c = 0 if no overflow, 1 on overflow.
+
+ We currently don't make use of the r value if c is != 0 (i.e.
+ overflow), we just convert to big integers and try again. This
+ could be improved by making r and c the correct values for
+ plugging into a new J#.
+
+ { r = ((I_)(a)) + ((I_)(b)); \
+ c = ((StgWord)(~(((I_)(a))^((I_)(b))) & (((I_)(a))^r))) \
+ >> (BITS_IN (I_) - 1); \
+ }
+ Wading through the mass of bracketry, it seems to reduce to:
+ c = ( (~(a^b)) & (a^r) ) >>unsigned (BITS_IN(I_)-1)
+
+-}
+ = do dflags <- getDynFlags
+ emit $ catAGraphs [
+ mkAssign (CmmLocal res_r) (CmmMachOp (mo_wordAdd dflags) [aa,bb]),
+ mkAssign (CmmLocal res_c) $
+ CmmMachOp (mo_wordUShr dflags) [
+ CmmMachOp (mo_wordAnd dflags) [
+ CmmMachOp (mo_wordNot dflags) [CmmMachOp (mo_wordXor dflags) [aa,bb]],
+ CmmMachOp (mo_wordXor dflags) [aa, CmmReg (CmmLocal res_r)]
+ ],
+ mkIntExpr dflags (wORD_SIZE_IN_BITS dflags - 1)
+ ]
+ ]
+genericIntAddCOp _ _ = panic "genericIntAddCOp"
+
+genericIntSubCOp :: GenericOp
+genericIntSubCOp [res_r, res_c] [aa, bb]
+{- Similarly:
+ #define subIntCzh(r,c,a,b) \
+ { r = ((I_)(a)) - ((I_)(b)); \
+ c = ((StgWord)((((I_)(a))^((I_)(b))) & (((I_)(a))^r))) \
+ >> (BITS_IN (I_) - 1); \
+ }
+
+ c = ((a^b) & (a^r)) >>unsigned (BITS_IN(I_)-1)
+-}
+ = do dflags <- getDynFlags
+ emit $ catAGraphs [
+ mkAssign (CmmLocal res_r) (CmmMachOp (mo_wordSub dflags) [aa,bb]),
+ mkAssign (CmmLocal res_c) $
+ CmmMachOp (mo_wordUShr dflags) [
+ CmmMachOp (mo_wordAnd dflags) [
+ CmmMachOp (mo_wordXor dflags) [aa,bb],
+ CmmMachOp (mo_wordXor dflags) [aa, CmmReg (CmmLocal res_r)]
+ ],
+ mkIntExpr dflags (wORD_SIZE_IN_BITS dflags - 1)
+ ]
+ ]
+genericIntSubCOp _ _ = panic "genericIntSubCOp"
+
genericWordMul2Op :: GenericOp
genericWordMul2Op [res_h, res_l] [arg_x, arg_y]
= do dflags <- getDynFlags
@@ -1013,6 +1181,7 @@ translateOp dflags SameMVarOp = Just (mo_wordEq dflags)
translateOp dflags SameMutableArrayOp = Just (mo_wordEq dflags)
translateOp dflags SameMutableByteArrayOp = Just (mo_wordEq dflags)
translateOp dflags SameMutableArrayArrayOp= Just (mo_wordEq dflags)
+translateOp dflags SameSmallMutableArrayOp= Just (mo_wordEq dflags)
translateOp dflags SameTVarOp = Just (mo_wordEq dflags)
translateOp dflags EqStablePtrOp = Just (mo_wordEq dflags)
@@ -1149,7 +1318,7 @@ doWritePtrArrayOp addr idx val
loadArrPtrsSize :: DynFlags -> CmmExpr -> CmmExpr
loadArrPtrsSize dflags addr = CmmLoad (cmmOffsetB dflags addr off) (bWord dflags)
- where off = fixedHdrSize dflags * wORD_SIZE dflags + oFFSET_StgMutArrPtrs_ptrs dflags
+ where off = fixedHdrSize dflags + oFFSET_StgMutArrPtrs_ptrs dflags
mkBasicIndexedRead :: ByteOff -- Initial offset in bytes
-> Maybe MachOp -- Optional result cast
@@ -1407,6 +1576,32 @@ mkBasicPrefetch locality off res base idx
[reg] -> emitAssign (CmmLocal reg) base
_ -> panic "StgCmmPrim: mkBasicPrefetch"
+-- ----------------------------------------------------------------------------
+-- Allocating byte arrays
+
+-- | Takes a register to return the newly allocated array in and the
+-- size of the new array in bytes. Allocates a new
+-- 'MutableByteArray#'.
+doNewByteArrayOp :: CmmFormal -> ByteOff -> FCode ()
+doNewByteArrayOp res_r n = do
+ dflags <- getDynFlags
+
+ let info_ptr = mkLblExpr mkArrWords_infoLabel
+ rep = arrWordsRep dflags n
+
+ tickyAllocPrim (mkIntExpr dflags (arrWordsHdrSize dflags))
+ (mkIntExpr dflags (nonHdrSize dflags rep))
+ (zeroExpr dflags)
+
+ let hdr_size = fixedHdrSize dflags
+
+ base <- allocHeapClosure rep info_ptr curCCS
+ [ (mkIntExpr dflags n,
+ hdr_size + oFFSET_StgArrWords_bytes dflags)
+ ]
+
+ emit $ mkAssign (CmmLocal res_r) base
+
-- ----------------------------------------------------------------------------
-- Copying byte arrays
@@ -1495,6 +1690,47 @@ doSetByteArrayOp ba off len c
p <- assignTempE $ cmmOffsetExpr dflags (cmmOffsetB dflags ba (arrWordsHdrSize dflags)) off
emitMemsetCall p c len (mkIntExpr dflags 1)
+-- ----------------------------------------------------------------------------
+-- Allocating arrays
+
+-- | Allocate a new array.
+doNewArrayOp :: CmmFormal -- ^ return register
+ -> SMRep -- ^ representation of the array
+ -> CLabel -- ^ info pointer
+ -> [(CmmExpr, ByteOff)] -- ^ header payload
+ -> WordOff -- ^ array size
+ -> CmmExpr -- ^ initial element
+ -> FCode ()
+doNewArrayOp res_r rep info payload n init = do
+ dflags <- getDynFlags
+
+ let info_ptr = mkLblExpr info
+
+ tickyAllocPrim (mkIntExpr dflags (hdrSize dflags rep))
+ (mkIntExpr dflags (nonHdrSize dflags rep))
+ (zeroExpr dflags)
+
+ base <- allocHeapClosure rep info_ptr curCCS payload
+
+ arr <- CmmLocal `fmap` newTemp (bWord dflags)
+ emit $ mkAssign arr base
+
+ -- Initialise all elements of the the array
+ p <- assignTemp $ cmmOffsetB dflags (CmmReg arr) (hdrSize dflags rep)
+ for <- newLabelC
+ emitLabel for
+ let loopBody =
+ [ mkStore (CmmReg (CmmLocal p)) init
+ , mkAssign (CmmLocal p) (cmmOffsetW dflags (CmmReg (CmmLocal p)) 1)
+ , mkBranch for ]
+ emit =<< mkCmmIfThen
+ (cmmULtWord dflags (CmmReg (CmmLocal p))
+ (cmmOffsetW dflags (CmmReg arr)
+ (hdrSizeW dflags rep + n)))
+ (catAGraphs loopBody)
+
+ emit $ mkAssign (CmmLocal res_r) (CmmReg arr)
+
-- ----------------------------------------------------------------------------
-- Copying pointer arrays
@@ -1516,7 +1752,7 @@ assignTempE e = do
-- destination 'MutableArray#', an offset into the destination array,
-- and the number of elements to copy. Copies the given number of
-- elements from the source array to the destination array.
-doCopyArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr
+doCopyArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> WordOff
-> FCode ()
doCopyArrayOp = emitCopyArray copy
where
@@ -1524,14 +1760,15 @@ doCopyArrayOp = emitCopyArray copy
-- they're of different types)
copy _src _dst dst_p src_p bytes =
do dflags <- getDynFlags
- emitMemcpyCall dst_p src_p bytes (mkIntExpr dflags (wORD_SIZE dflags))
+ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags))
-- | Takes a source 'MutableArray#', an offset in the source array, a
-- destination 'MutableArray#', an offset into the destination array,
-- and the number of elements to copy. Copies the given number of
-- elements from the source array to the destination array.
-doCopyMutableArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr
+doCopyMutableArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> WordOff
-> FCode ()
doCopyMutableArrayOp = emitCopyArray copy
where
@@ -1541,114 +1778,299 @@ doCopyMutableArrayOp = emitCopyArray copy
copy src dst dst_p src_p bytes = do
dflags <- getDynFlags
[moveCall, cpyCall] <- forkAlts [
- getCode $ emitMemmoveCall dst_p src_p bytes (mkIntExpr dflags (wORD_SIZE dflags)),
- getCode $ emitMemcpyCall dst_p src_p bytes (mkIntExpr dflags (wORD_SIZE dflags))
+ getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags)),
+ getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags))
]
emit =<< mkCmmIfThenElse (cmmEqWord dflags src dst) moveCall cpyCall
-emitCopyArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr
- -> FCode ())
- -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr
+emitCopyArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> ByteOff
+ -> FCode ()) -- ^ copy function
+ -> CmmExpr -- ^ source array
+ -> CmmExpr -- ^ offset in source array
+ -> CmmExpr -- ^ destination array
+ -> CmmExpr -- ^ offset in destination array
+ -> WordOff -- ^ number of elements to copy
-> FCode ()
-emitCopyArray copy src0 src_off0 dst0 dst_off0 n0 = do
+emitCopyArray copy src0 src_off dst0 dst_off0 n = do
dflags <- getDynFlags
- n <- assignTempE n0
- nonzero <- getCode $ do
+ when (n /= 0) $ do
-- Passed as arguments (be careful)
src <- assignTempE src0
- src_off <- assignTempE src_off0
dst <- assignTempE dst0
dst_off <- assignTempE dst_off0
-- Set the dirty bit in the header.
emit (setInfo dst (CmmLit (CmmLabel mkMAP_DIRTY_infoLabel)))
- dst_elems_p <- assignTempE $ cmmOffsetB dflags dst (arrPtrsHdrSize dflags)
+ dst_elems_p <- assignTempE $ cmmOffsetB dflags dst
+ (arrPtrsHdrSize dflags)
dst_p <- assignTempE $ cmmOffsetExprW dflags dst_elems_p dst_off
- src_p <- assignTempE $ cmmOffsetExprW dflags (cmmOffsetB dflags src (arrPtrsHdrSize dflags)) src_off
- bytes <- assignTempE $ cmmMulWord dflags n (mkIntExpr dflags (wORD_SIZE dflags))
+ src_p <- assignTempE $ cmmOffsetExprW dflags
+ (cmmOffsetB dflags src (arrPtrsHdrSize dflags)) src_off
+ let bytes = wordsToBytes dflags n
copy src dst dst_p src_p bytes
-- The base address of the destination card table
- dst_cards_p <- assignTempE $ cmmOffsetExprW dflags dst_elems_p (loadArrPtrsSize dflags dst)
+ dst_cards_p <- assignTempE $ cmmOffsetExprW dflags dst_elems_p
+ (loadArrPtrsSize dflags dst)
emitSetCards dst_off dst_cards_p n
- emit =<< mkCmmIfThen (cmmNeWord dflags n (mkIntExpr dflags 0)) nonzero
+doCopySmallArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> WordOff
+ -> FCode ()
+doCopySmallArrayOp = emitCopySmallArray copy
+ where
+ -- Copy data (we assume the arrays aren't overlapping since
+ -- they're of different types)
+ copy _src _dst dst_p src_p bytes =
+ do dflags <- getDynFlags
+ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags))
+
+
+doCopySmallMutableArrayOp :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> WordOff
+ -> FCode ()
+doCopySmallMutableArrayOp = emitCopySmallArray copy
+ where
+ -- The only time the memory might overlap is when the two arrays
+ -- we were provided are the same array!
+ -- TODO: Optimize branch for common case of no aliasing.
+ copy src dst dst_p src_p bytes = do
+ dflags <- getDynFlags
+ [moveCall, cpyCall] <- forkAlts
+ [ getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags))
+ , getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (mkIntExpr dflags (wORD_SIZE dflags))
+ ]
+ emit =<< mkCmmIfThenElse (cmmEqWord dflags src dst) moveCall cpyCall
+
+emitCopySmallArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> ByteOff
+ -> FCode ()) -- ^ copy function
+ -> CmmExpr -- ^ source array
+ -> CmmExpr -- ^ offset in source array
+ -> CmmExpr -- ^ destination array
+ -> CmmExpr -- ^ offset in destination array
+ -> WordOff -- ^ number of elements to copy
+ -> FCode ()
+emitCopySmallArray copy src0 src_off dst0 dst_off n = do
+ dflags <- getDynFlags
+
+ -- Passed as arguments (be careful)
+ src <- assignTempE src0
+ dst <- assignTempE dst0
+
+ -- Set the dirty bit in the header.
+ emit (setInfo dst (CmmLit (CmmLabel mkSMAP_DIRTY_infoLabel)))
+
+ dst_p <- assignTempE $ cmmOffsetExprW dflags
+ (cmmOffsetB dflags dst (smallArrPtrsHdrSize dflags)) dst_off
+ src_p <- assignTempE $ cmmOffsetExprW dflags
+ (cmmOffsetB dflags src (smallArrPtrsHdrSize dflags)) src_off
+ let bytes = wordsToBytes dflags n
+
+ copy src dst dst_p src_p bytes
-- | Takes an info table label, a register to return the newly
-- allocated array in, a source array, an offset in the source array,
--- and the number of elements to copy. Allocates a new array and
--- initializes it form the source array.
-emitCloneArray :: CLabel -> CmmFormal -> CmmExpr -> CmmExpr -> CmmExpr
+-- and the number of elements to copy. Allocates a new array and
+-- initializes it from the source array.
+emitCloneArray :: CLabel -> CmmFormal -> CmmExpr -> CmmExpr -> WordOff
-> FCode ()
-emitCloneArray info_p res_r src0 src_off0 n0 = do
+emitCloneArray info_p res_r src src_off n = do
dflags <- getDynFlags
- let arrPtrsHdrSizeW dflags = mkIntExpr dflags (fixedHdrSize dflags +
- (sIZEOF_StgMutArrPtrs_NoHdr dflags `div` wORD_SIZE dflags))
- myCapability = cmmSubWord dflags (CmmReg baseReg) (mkIntExpr dflags (oFFSET_Capability_r dflags))
- -- Passed as arguments (be careful)
- src <- assignTempE src0
- src_off <- assignTempE src_off0
- n <- assignTempE n0
- card_bytes <- assignTempE $ cardRoundUp dflags n
- size <- assignTempE $ cmmAddWord dflags n (bytesToWordsRoundUp dflags card_bytes)
- words <- assignTempE $ cmmAddWord dflags (arrPtrsHdrSizeW dflags) size
+ let info_ptr = mkLblExpr info_p
+ rep = arrPtrsRep dflags n
- arr_r <- newTemp (bWord dflags)
- emitAllocateCall arr_r myCapability words
- tickyAllocPrim (mkIntExpr dflags (arrPtrsHdrSize dflags)) (cmmMulWord dflags n (wordSize dflags))
- (zeroExpr dflags)
+ tickyAllocPrim (mkIntExpr dflags (arrPtrsHdrSize dflags))
+ (mkIntExpr dflags (nonHdrSize dflags rep))
+ (zeroExpr dflags)
- let arr = CmmReg (CmmLocal arr_r)
- emitSetDynHdr arr (CmmLit (CmmLabel info_p)) curCCS
- emit $ mkStore (cmmOffsetB dflags arr (fixedHdrSize dflags * wORD_SIZE dflags +
- oFFSET_StgMutArrPtrs_ptrs dflags)) n
- emit $ mkStore (cmmOffsetB dflags arr (fixedHdrSize dflags * wORD_SIZE dflags +
- oFFSET_StgMutArrPtrs_size dflags)) size
+ let hdr_size = fixedHdrSize dflags
- dst_p <- assignTempE $ cmmOffsetB dflags arr (arrPtrsHdrSize dflags)
- src_p <- assignTempE $ cmmOffsetExprW dflags (cmmOffsetB dflags src (arrPtrsHdrSize dflags))
- src_off
+ base <- allocHeapClosure rep info_ptr curCCS
+ [ (mkIntExpr dflags n,
+ hdr_size + oFFSET_StgMutArrPtrs_ptrs dflags)
+ , (mkIntExpr dflags (nonHdrSizeW rep),
+ hdr_size + oFFSET_StgMutArrPtrs_size dflags)
+ ]
- emitMemcpyCall dst_p src_p (cmmMulWord dflags n (wordSize dflags)) (mkIntExpr dflags (wORD_SIZE dflags))
+ arr <- CmmLocal `fmap` newTemp (bWord dflags)
+ emit $ mkAssign arr base
- emitMemsetCall (cmmOffsetExprW dflags dst_p n)
- (mkIntExpr dflags 1)
- card_bytes
+ dst_p <- assignTempE $ cmmOffsetB dflags (CmmReg arr)
+ (arrPtrsHdrSize dflags)
+ src_p <- assignTempE $ cmmOffsetExprW dflags src
+ (cmmAddWord dflags
+ (mkIntExpr dflags (arrPtrsHdrSizeW dflags)) src_off)
+
+ emitMemcpyCall dst_p src_p (mkIntExpr dflags (wordsToBytes dflags n))
+ (mkIntExpr dflags (wORD_SIZE dflags))
+
+ emit $ mkAssign (CmmLocal res_r) (CmmReg arr)
+
+-- | Takes an info table label, a register to return the newly
+-- allocated array in, a source array, an offset in the source array,
+-- and the number of elements to copy. Allocates a new array and
+-- initializes it from the source array.
+emitCloneSmallArray :: CLabel -> CmmFormal -> CmmExpr -> CmmExpr -> WordOff
+ -> FCode ()
+emitCloneSmallArray info_p res_r src src_off n = do
+ dflags <- getDynFlags
+
+ let info_ptr = mkLblExpr info_p
+ rep = smallArrPtrsRep n
+
+ tickyAllocPrim (mkIntExpr dflags (smallArrPtrsHdrSize dflags))
+ (mkIntExpr dflags (nonHdrSize dflags rep))
+ (zeroExpr dflags)
+
+ let hdr_size = fixedHdrSize dflags
+
+ base <- allocHeapClosure rep info_ptr curCCS
+ [ (mkIntExpr dflags n,
+ hdr_size + oFFSET_StgSmallMutArrPtrs_ptrs dflags)
+ ]
+
+ arr <- CmmLocal `fmap` newTemp (bWord dflags)
+ emit $ mkAssign arr base
+
+ dst_p <- assignTempE $ cmmOffsetB dflags (CmmReg arr)
+ (smallArrPtrsHdrSize dflags)
+ src_p <- assignTempE $ cmmOffsetExprW dflags src
+ (cmmAddWord dflags
+ (mkIntExpr dflags (smallArrPtrsHdrSizeW dflags)) src_off)
+
+ emitMemcpyCall dst_p src_p (mkIntExpr dflags (wordsToBytes dflags n))
(mkIntExpr dflags (wORD_SIZE dflags))
- emit $ mkAssign (CmmLocal res_r) arr
+
+ emit $ mkAssign (CmmLocal res_r) (CmmReg arr)
-- | Takes and offset in the destination array, the base address of
-- the card table, and the number of elements affected (*not* the
-- number of cards). The number of elements may not be zero.
-- Marks the relevant cards as dirty.
-emitSetCards :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
+emitSetCards :: CmmExpr -> CmmExpr -> WordOff -> FCode ()
emitSetCards dst_start dst_cards_start n = do
dflags <- getDynFlags
- start_card <- assignTempE $ card dflags dst_start
- let end_card = card dflags (cmmSubWord dflags (cmmAddWord dflags dst_start n) (mkIntExpr dflags 1))
+ start_card <- assignTempE $ cardCmm dflags dst_start
+ let end_card = cardCmm dflags
+ (cmmSubWord dflags
+ (cmmAddWord dflags dst_start (mkIntExpr dflags n))
+ (mkIntExpr dflags 1))
emitMemsetCall (cmmAddWord dflags dst_cards_start start_card)
(mkIntExpr dflags 1)
(cmmAddWord dflags (cmmSubWord dflags end_card start_card) (mkIntExpr dflags 1))
(mkIntExpr dflags 1) -- no alignment (1 byte)
-- Convert an element index to a card index
-card :: DynFlags -> CmmExpr -> CmmExpr
-card dflags i = cmmUShrWord dflags i (mkIntExpr dflags (mUT_ARR_PTRS_CARD_BITS dflags))
+cardCmm :: DynFlags -> CmmExpr -> CmmExpr
+cardCmm dflags i =
+ cmmUShrWord dflags i (mkIntExpr dflags (mUT_ARR_PTRS_CARD_BITS dflags))
--- Convert a number of elements to a number of cards, rounding up
-cardRoundUp :: DynFlags -> CmmExpr -> CmmExpr
-cardRoundUp dflags i = card dflags (cmmAddWord dflags i (mkIntExpr dflags ((1 `shiftL` mUT_ARR_PTRS_CARD_BITS dflags) - 1)))
+------------------------------------------------------------------------------
+-- SmallArray PrimOp implementations
+
+doReadSmallPtrArrayOp :: LocalReg
+ -> CmmExpr
+ -> CmmExpr
+ -> FCode ()
+doReadSmallPtrArrayOp res addr idx = do
+ dflags <- getDynFlags
+ mkBasicIndexedRead (smallArrPtrsHdrSize dflags) Nothing (gcWord dflags) res addr
+ (gcWord dflags) idx
+
+doWriteSmallPtrArrayOp :: CmmExpr
+ -> CmmExpr
+ -> CmmExpr
+ -> FCode ()
+doWriteSmallPtrArrayOp addr idx val = do
+ dflags <- getDynFlags
+ let ty = cmmExprType dflags val
+ mkBasicIndexedWrite (smallArrPtrsHdrSize dflags) Nothing addr ty idx val
+ emit (setInfo addr (CmmLit (CmmLabel mkSMAP_DIRTY_infoLabel)))
-bytesToWordsRoundUp :: DynFlags -> CmmExpr -> CmmExpr
-bytesToWordsRoundUp dflags e = cmmQuotWord dflags (cmmAddWord dflags e (mkIntExpr dflags (wORD_SIZE dflags - 1)))
- (wordSize dflags)
+------------------------------------------------------------------------------
+-- Atomic read-modify-write
+
+-- | Emit an atomic modification to a byte array element. The result
+-- reg contains that previous value of the element. Implies a full
+-- memory barrier.
+doAtomicRMW :: LocalReg -- ^ Result reg
+ -> AtomicMachOp -- ^ Atomic op (e.g. add)
+ -> CmmExpr -- ^ MutableByteArray#
+ -> CmmExpr -- ^ Index
+ -> CmmType -- ^ Type of element by which we are indexing
+ -> CmmExpr -- ^ Op argument (e.g. amount to add)
+ -> FCode ()
+doAtomicRMW res amop mba idx idx_ty n = do
+ dflags <- getDynFlags
+ let width = typeWidth idx_ty
+ addr = cmmIndexOffExpr dflags (arrWordsHdrSize dflags)
+ width mba idx
+ emitPrimCall
+ [ res ]
+ (MO_AtomicRMW width amop)
+ [ addr, n ]
+
+-- | Emit an atomic read to a byte array that acts as a memory barrier.
+doAtomicReadByteArray
+ :: LocalReg -- ^ Result reg
+ -> CmmExpr -- ^ MutableByteArray#
+ -> CmmExpr -- ^ Index
+ -> CmmType -- ^ Type of element by which we are indexing
+ -> FCode ()
+doAtomicReadByteArray res mba idx idx_ty = do
+ dflags <- getDynFlags
+ let width = typeWidth idx_ty
+ addr = cmmIndexOffExpr dflags (arrWordsHdrSize dflags)
+ width mba idx
+ emitPrimCall
+ [ res ]
+ (MO_AtomicRead width)
+ [ addr ]
+
+-- | Emit an atomic write to a byte array that acts as a memory barrier.
+doAtomicWriteByteArray
+ :: CmmExpr -- ^ MutableByteArray#
+ -> CmmExpr -- ^ Index
+ -> CmmType -- ^ Type of element by which we are indexing
+ -> CmmExpr -- ^ Value to write
+ -> FCode ()
+doAtomicWriteByteArray mba idx idx_ty val = do
+ dflags <- getDynFlags
+ let width = typeWidth idx_ty
+ addr = cmmIndexOffExpr dflags (arrWordsHdrSize dflags)
+ width mba idx
+ emitPrimCall
+ [ {- no results -} ]
+ (MO_AtomicWrite width)
+ [ addr, val ]
+
+doCasByteArray
+ :: LocalReg -- ^ Result reg
+ -> CmmExpr -- ^ MutableByteArray#
+ -> CmmExpr -- ^ Index
+ -> CmmType -- ^ Type of element by which we are indexing
+ -> CmmExpr -- ^ Old value
+ -> CmmExpr -- ^ New value
+ -> FCode ()
+doCasByteArray res mba idx idx_ty old new = do
+ dflags <- getDynFlags
+ let width = (typeWidth idx_ty)
+ addr = cmmIndexOffExpr dflags (arrWordsHdrSize dflags)
+ width mba idx
+ emitPrimCall
+ [ res ]
+ (MO_Cmpxchg width)
+ [ addr, old, new ]
-wordSize :: DynFlags -> CmmExpr
-wordSize dflags = mkIntExpr dflags (wORD_SIZE dflags)
+------------------------------------------------------------------------------
+-- Helpers for emitting function calls
-- | Emit a call to @memcpy@.
emitMemcpyCall :: CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
@@ -1675,19 +2097,6 @@ emitMemsetCall dst c n align = do
MO_Memset
[ dst, c, n, align ]
--- | Emit a call to @allocate@.
-emitAllocateCall :: LocalReg -> CmmExpr -> CmmExpr -> FCode ()
-emitAllocateCall res cap n = do
- emitCCall
- [ (res, AddrHint) ]
- allocate
- [ (cap, AddrHint)
- , (n, NoHint)
- ]
- where
- allocate = CmmLit (CmmLabel (mkForeignLabel (fsLit "allocate") Nothing
- ForeignLabelInExternalPackage IsFunction))
-
emitBSwapCall :: LocalReg -> CmmExpr -> Width -> FCode ()
emitBSwapCall res x width = do
emitPrimCall
@@ -1701,3 +2110,17 @@ emitPopCntCall res x width = do
[ res ]
(MO_PopCnt width)
[ x ]
+
+emitClzCall :: LocalReg -> CmmExpr -> Width -> FCode ()
+emitClzCall res x width = do
+ emitPrimCall
+ [ res ]
+ (MO_Clz width)
+ [ x ]
+
+emitCtzCall :: LocalReg -> CmmExpr -> Width -> FCode ()
+emitCtzCall res x width = do
+ emitPrimCall
+ [ res ]
+ (MO_Ctz width)
+ [ x ]
diff --git a/compiler/codeGen/StgCmmProf.hs b/compiler/codeGen/StgCmmProf.hs
index e8a2a10fddf8..7249477c9f69 100644
--- a/compiler/codeGen/StgCmmProf.hs
+++ b/compiler/codeGen/StgCmmProf.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Code generation for profiling
@@ -149,7 +151,7 @@ profDynAlloc :: SMRep -> CmmExpr -> FCode ()
profDynAlloc rep ccs
= ifProfiling $
do dflags <- getDynFlags
- profAlloc (mkIntExpr dflags (heapClosureSize dflags rep)) ccs
+ profAlloc (mkIntExpr dflags (heapClosureSizeW dflags rep)) ccs
-- | Record the allocation of a closure (size is given by a CmmExpr)
-- The size must be in words, because the allocation counter in a CCS counts
@@ -181,7 +183,7 @@ enterCostCentreFun ccs closure =
ifProfiling $ do
if isCurrentCCS ccs
then do dflags <- getDynFlags
- emitRtsCall rtsPackageId (fsLit "enterFunCCS")
+ emitRtsCall rtsPackageKey (fsLit "enterFunCCS")
[(CmmReg (CmmGlobal BaseReg), AddrHint),
(costCentreFrom dflags closure, AddrHint)] False
else return () -- top-level function, nothing to do
@@ -283,7 +285,7 @@ emitSetCCC cc tick push
pushCostCentre :: LocalReg -> CmmExpr -> CostCentre -> FCode ()
pushCostCentre result ccs cc
= emitRtsCallWithResult result AddrHint
- rtsPackageId
+ rtsPackageKey
(fsLit "pushCostCentre") [(ccs,AddrHint),
(CmmLit (mkCCostCentre cc), AddrHint)]
False
@@ -354,7 +356,7 @@ ldvEnter cl_ptr = do
loadEra :: DynFlags -> CmmExpr
loadEra dflags = CmmMachOp (MO_UU_Conv (cIntWidth dflags) (wordWidth dflags))
- [CmmLoad (mkLblExpr (mkCmmDataLabel rtsPackageId (fsLit "era")))
+ [CmmLoad (mkLblExpr (mkCmmDataLabel rtsPackageKey (fsLit "era")))
(cInt dflags)]
ldvWord :: DynFlags -> CmmExpr -> CmmExpr
diff --git a/compiler/codeGen/StgCmmTicky.hs b/compiler/codeGen/StgCmmTicky.hs
index 3f3c3c5a19be..3652a799798b 100644
--- a/compiler/codeGen/StgCmmTicky.hs
+++ b/compiler/codeGen/StgCmmTicky.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE BangPatterns, CPP #-}
+
-----------------------------------------------------------------------------
--
-- Code generation for ticky-ticky profiling
@@ -325,7 +327,7 @@ registerTickyCtr ctr_lbl = do
, mkStore (CmmLit (cmmLabelOffB ctr_lbl
(oFFSET_StgEntCounter_registeredp dflags)))
(mkIntExpr dflags 1) ]
- ticky_entry_ctrs = mkLblExpr (mkCmmDataLabel rtsPackageId (fsLit "ticky_entry_ctrs"))
+ ticky_entry_ctrs = mkLblExpr (mkCmmDataLabel rtsPackageKey (fsLit "ticky_entry_ctrs"))
emit =<< mkCmmIfThen test (catAGraphs register_stmts)
tickyReturnOldCon, tickyReturnNewCon :: RepArity -> FCode ()
@@ -415,7 +417,7 @@ tickyDynAlloc :: Maybe Id -> SMRep -> LambdaFormInfo -> FCode ()
--
-- TODO what else to count while we're here?
tickyDynAlloc mb_id rep lf = ifTicky $ getDynFlags >>= \dflags ->
- let bytes = wORD_SIZE dflags * heapClosureSize dflags rep
+ let bytes = wORD_SIZE dflags * heapClosureSizeW dflags rep
countGlobal tot ctr = do
bumpTickyCounterBy tot bytes
@@ -470,12 +472,12 @@ tickyAllocHeap genuine hp
bytes,
-- Bump the global allocation total ALLOC_HEAP_tot
addToMemLbl (cLong dflags)
- (mkCmmDataLabel rtsPackageId (fsLit "ALLOC_HEAP_tot"))
+ (mkCmmDataLabel rtsPackageKey (fsLit "ALLOC_HEAP_tot"))
bytes,
-- Bump the global allocation counter ALLOC_HEAP_ctr
if not genuine then mkNop
else addToMemLbl (cLong dflags)
- (mkCmmDataLabel rtsPackageId (fsLit "ALLOC_HEAP_ctr"))
+ (mkCmmDataLabel rtsPackageKey (fsLit "ALLOC_HEAP_ctr"))
1
]}
@@ -485,7 +487,9 @@ tickyAllocHeap genuine hp
-- the units are bytes
-tickyAllocPrim :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
+tickyAllocPrim :: CmmExpr -- ^ size of the full header, in bytes
+ -> CmmExpr -- ^ size of the payload, in bytes
+ -> CmmExpr -> FCode ()
tickyAllocPrim _hdr _goods _slop = ifTicky $ do
bumpTickyCounter (fsLit "ALLOC_PRIM_ctr")
bumpTickyCounterByE (fsLit "ALLOC_PRIM_adm") _hdr
@@ -537,13 +541,13 @@ ifTickyDynThunk :: FCode () -> FCode ()
ifTickyDynThunk code = tickyDynThunkIsOn >>= \b -> when b code
bumpTickyCounter :: FastString -> FCode ()
-bumpTickyCounter lbl = bumpTickyLbl (mkCmmDataLabel rtsPackageId lbl)
+bumpTickyCounter lbl = bumpTickyLbl (mkCmmDataLabel rtsPackageKey lbl)
bumpTickyCounterBy :: FastString -> Int -> FCode ()
-bumpTickyCounterBy lbl = bumpTickyLblBy (mkCmmDataLabel rtsPackageId lbl)
+bumpTickyCounterBy lbl = bumpTickyLblBy (mkCmmDataLabel rtsPackageKey lbl)
bumpTickyCounterByE :: FastString -> CmmExpr -> FCode ()
-bumpTickyCounterByE lbl = bumpTickyLblByE (mkCmmDataLabel rtsPackageId lbl)
+bumpTickyCounterByE lbl = bumpTickyLblByE (mkCmmDataLabel rtsPackageKey lbl)
bumpTickyEntryCount :: CLabel -> FCode ()
bumpTickyEntryCount lbl = do
diff --git a/compiler/codeGen/StgCmmUtils.hs b/compiler/codeGen/StgCmmUtils.hs
index 45b0f0c785f8..3dacf00192d9 100644
--- a/compiler/codeGen/StgCmmUtils.hs
+++ b/compiler/codeGen/StgCmmUtils.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Code generator utilities; mostly monadic
@@ -71,6 +73,7 @@ import Data.List
import Data.Ord
import Data.Word
import Data.Maybe
+import Control.Arrow ( first )
-------------------------------------------------------------------------
@@ -142,7 +145,8 @@ addToMemE rep ptr n
--
-------------------------------------------------------------------------
-mkTaggedObjectLoad :: DynFlags -> LocalReg -> LocalReg -> WordOff -> DynTag -> CmmAGraph
+mkTaggedObjectLoad
+ :: DynFlags -> LocalReg -> LocalReg -> ByteOff -> DynTag -> CmmAGraph
-- (loadTaggedObjectField reg base off tag) generates assignment
-- reg = bitsK[ base + off - tag ]
-- where K is fixed by 'reg'
@@ -150,7 +154,7 @@ mkTaggedObjectLoad dflags reg base offset tag
= mkAssign (CmmLocal reg)
(CmmLoad (cmmOffsetB dflags
(CmmReg (CmmLocal base))
- (wORD_SIZE dflags * offset - tag))
+ (offset - tag))
(localRegType reg))
-------------------------------------------------------------------------
@@ -172,10 +176,10 @@ tagToClosure dflags tycon tag
--
-------------------------------------------------------------------------
-emitRtsCall :: PackageId -> FastString -> [(CmmExpr,ForeignHint)] -> Bool -> FCode ()
+emitRtsCall :: PackageKey -> FastString -> [(CmmExpr,ForeignHint)] -> Bool -> FCode ()
emitRtsCall pkg fun args safe = emitRtsCallGen [] (mkCmmCodeLabel pkg fun) args safe
-emitRtsCallWithResult :: LocalReg -> ForeignHint -> PackageId -> FastString
+emitRtsCallWithResult :: LocalReg -> ForeignHint -> PackageKey -> FastString
-> [(CmmExpr,ForeignHint)] -> Bool -> FCode ()
emitRtsCallWithResult res hint pkg fun args safe
= emitRtsCallGen [(res,hint)] (mkCmmCodeLabel pkg fun) args safe
@@ -450,11 +454,11 @@ mustFollow :: Stmt -> Stmt -> Bool
-------------------------------------------------------------------------
-emitSwitch :: CmmExpr -- Tag to switch on
- -> [(ConTagZ, CmmAGraph)] -- Tagged branches
- -> Maybe CmmAGraph -- Default branch (if any)
- -> ConTagZ -> ConTagZ -- Min and Max possible values; behaviour
- -- outside this range is undefined
+emitSwitch :: CmmExpr -- Tag to switch on
+ -> [(ConTagZ, CmmAGraphScoped)] -- Tagged branches
+ -> Maybe CmmAGraphScoped -- Default branch (if any)
+ -> ConTagZ -> ConTagZ -- Min and Max possible values; behaviour
+ -- outside this range is undefined
-> FCode ()
emitSwitch tag_expr branches mb_deflt lo_tag hi_tag
= do { dflags <- getDynFlags
@@ -464,18 +468,18 @@ emitSwitch tag_expr branches mb_deflt lo_tag hi_tag
| otherwise = False
-mkCmmSwitch :: Bool -- True <=> never generate a
- -- conditional tree
- -> CmmExpr -- Tag to switch on
- -> [(ConTagZ, CmmAGraph)] -- Tagged branches
- -> Maybe CmmAGraph -- Default branch (if any)
- -> ConTagZ -> ConTagZ -- Min and Max possible values; behaviour
- -- outside this range is undefined
+mkCmmSwitch :: Bool -- True <=> never generate a
+ -- conditional tree
+ -> CmmExpr -- Tag to switch on
+ -> [(ConTagZ, CmmAGraphScoped)] -- Tagged branches
+ -> Maybe CmmAGraphScoped -- Default branch (if any)
+ -> ConTagZ -> ConTagZ -- Min and Max possible values; behaviour
+ -- outside this range is undefined
-> FCode ()
-- First, two rather common cases in which there is no work to do
-mkCmmSwitch _ _ [] (Just code) _ _ = emit code
-mkCmmSwitch _ _ [(_,code)] Nothing _ _ = emit code
+mkCmmSwitch _ _ [] (Just code) _ _ = emit (fst code)
+mkCmmSwitch _ _ [(_,code)] Nothing _ _ = emit (fst code)
-- Right, off we go
mkCmmSwitch via_C tag_expr branches mb_deflt lo_tag hi_tag = do
@@ -631,17 +635,17 @@ mk_switch tag_expr branches mb_deflt lo_tag hi_tag via_C
is_lo (t,_) = t < mid_tag
--------------
-emitCmmLitSwitch :: CmmExpr -- Tag to switch on
- -> [(Literal, CmmAGraph)] -- Tagged branches
- -> CmmAGraph -- Default branch (always)
- -> FCode () -- Emit the code
+emitCmmLitSwitch :: CmmExpr -- Tag to switch on
+ -> [(Literal, CmmAGraphScoped)] -- Tagged branches
+ -> CmmAGraphScoped -- Default branch (always)
+ -> FCode () -- Emit the code
-- Used for general literals, whose size might not be a word,
-- where there is always a default case, and where we don't know
-- the range of values for certain. For simplicity we always generate a tree.
--
-- ToDo: for integers we could do better here, perhaps by generalising
-- mk_switch and using that. --SDM 15/09/2004
-emitCmmLitSwitch _scrut [] deflt = emit deflt
+emitCmmLitSwitch _scrut [] deflt = emit $ fst deflt
emitCmmLitSwitch scrut branches deflt = do
scrut' <- assignTemp' scrut
join_lbl <- newLabelC
@@ -682,7 +686,7 @@ mk_lit_switch scrut deflt_blk_id branches
--------------
-label_default :: BlockId -> Maybe CmmAGraph -> FCode (Maybe BlockId)
+label_default :: BlockId -> Maybe CmmAGraphScoped -> FCode (Maybe BlockId)
label_default _ Nothing
= return Nothing
label_default join_lbl (Just code)
@@ -690,7 +694,7 @@ label_default join_lbl (Just code)
return (Just lbl)
--------------
-label_branches :: BlockId -> [(a,CmmAGraph)] -> FCode [(a,BlockId)]
+label_branches :: BlockId -> [(a,CmmAGraphScoped)] -> FCode [(a,BlockId)]
label_branches _join_lbl []
= return []
label_branches join_lbl ((tag,code):branches)
@@ -699,14 +703,14 @@ label_branches join_lbl ((tag,code):branches)
return ((tag,lbl):branches')
--------------
-label_code :: BlockId -> CmmAGraph -> FCode BlockId
+label_code :: BlockId -> CmmAGraphScoped -> FCode BlockId
-- label_code J code
-- generates
-- [L: code; goto J]
-- and returns L
label_code join_lbl code = do
lbl <- newLabelC
- emitOutOfLine lbl (code <*> mkBranch join_lbl)
+ emitOutOfLine lbl (first (<*> mkBranch join_lbl) code)
return lbl
--------------
diff --git a/compiler/coreSyn/CoreArity.lhs b/compiler/coreSyn/CoreArity.lhs
index 7042718af5d3..26669b6d32b7 100644
--- a/compiler/coreSyn/CoreArity.lhs
+++ b/compiler/coreSyn/CoreArity.lhs
@@ -6,7 +6,8 @@
Arity and eta expansion
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -15,7 +16,7 @@
-- | Arit and eta expansion
module CoreArity (
- manifestArity, exprArity, exprBotStrictness_maybe,
+ manifestArity, exprArity, typeArity, exprBotStrictness_maybe,
exprEtaExpandArity, findRhsArity, CheapFun, etaExpand
) where
@@ -73,7 +74,8 @@ should have arity 3, regardless of f's arity.
\begin{code}
manifestArity :: CoreExpr -> Arity
--- ^ manifestArity sees how many leading value lambdas there are
+-- ^ manifestArity sees how many leading value lambdas there are,
+-- after looking through casts
manifestArity (Lam v e) | isId v = 1 + manifestArity e
| otherwise = manifestArity e
manifestArity (Tick t e) | not (tickishIsCode t) = manifestArity e
@@ -143,7 +145,7 @@ exprBotStrictness_maybe e
Nothing -> Nothing
Just ar -> Just (ar, sig ar)
where
- env = AE { ae_bndrs = [], ae_ped_bot = True, ae_cheap_fn = \ _ _ -> False }
+ env = AE { ae_ped_bot = True, ae_cheap_fn = \ _ _ -> False }
sig ar = mkClosedStrictSig (replicate ar topDmd) botRes
-- For this purpose we can be very simple
\end{code}
@@ -323,14 +325,10 @@ this transformation. So we try to limit it as much as possible:
going to diverge eventually anyway then getting the best arity
isn't an issue, so we might as well play safe
- (3) Do NOT move a lambda outside a case unless
+ (3) Do NOT move a lambda outside a case unless
(a) The scrutinee is ok-for-speculation, or
- (b) There is an enclosing value \x, and the scrutinee is x
- E.g. let x = case y of ( DEFAULT -> \v -> blah }
- We don't move the \y out. This is pretty arbitrary; but it
- catches the common case of doing `seq` on y.
- This is the reason for the under_lam argument to arityType.
- See Trac #5625
+ (b) more liberally: the scrutinee is cheap (e.g. a variable), and
+ -fpedantic-bottoms is not enforced (see Trac #2915 for an example)
Of course both (1) and (2) are readily defeated by disguising the bottoms.
@@ -492,8 +490,7 @@ exprEtaExpandArity dflags e
ATop oss -> length oss
ABot n -> n
where
- env = AE { ae_bndrs = []
- , ae_cheap_fn = mk_cheap_fn dflags isCheapApp
+ env = AE { ae_cheap_fn = mk_cheap_fn dflags isCheapApp
, ae_ped_bot = gopt Opt_PedanticBottoms dflags }
getBotArity :: ArityType -> Maybe Arity
@@ -562,8 +559,7 @@ rhsEtaExpandArity dflags cheap_app e
ATop [] -> 0
ABot n -> n
where
- env = AE { ae_bndrs = []
- , ae_cheap_fn = mk_cheap_fn dflags cheap_app
+ env = AE { ae_cheap_fn = mk_cheap_fn dflags cheap_app
, ae_ped_bot = gopt Opt_PedanticBottoms dflags }
has_lam (Tick _ e) = has_lam e
@@ -698,9 +694,7 @@ type CheapFun = CoreExpr -> Maybe Type -> Bool
-- of the expression; Nothing means "don't know"
data ArityEnv
- = AE { ae_bndrs :: [Id] -- Enclosing value-lambda Ids
- -- See Note [Dealing with bottom (3)]
- , ae_cheap_fn :: CheapFun
+ = AE { ae_cheap_fn :: CheapFun
, ae_ped_bot :: Bool -- True <=> be pedantic about bottoms
}
@@ -734,19 +728,14 @@ arityType _ (Var v)
-- Lambdas; increase arity
arityType env (Lam x e)
- | isId x = arityLam x (arityType env' e)
+ | isId x = arityLam x (arityType env e)
| otherwise = arityType env e
- where
- env' = env { ae_bndrs = x : ae_bndrs env }
-- Applications; decrease arity, except for types
arityType env (App fun (Type _))
= arityType env fun
arityType env (App fun arg )
- = arityApp (arityType env' fun) (ae_cheap_fn env arg Nothing)
- where
- env' = env { ae_bndrs = case ae_bndrs env of
- { [] -> []; (_:xs) -> xs } }
+ = arityApp (arityType env fun) (ae_cheap_fn env arg Nothing)
-- Case/Let; keep arity if either the expression is cheap
-- or it's a 1-shot lambda
@@ -766,17 +755,11 @@ arityType env (Case scrut _ _ alts)
| otherwise -> ABot 0 -- if RHS is bottomming
-- See Note [Dealing with bottom (2)]
- ATop as | not (ae_ped_bot env) -- Check -fpedantic-bottoms
- , is_under scrut -> ATop as
- | exprOkForSpeculation scrut -> ATop as
- | otherwise -> ATop (takeWhile isOneShotInfo as)
+ ATop as | not (ae_ped_bot env) -- See Note [Dealing with bottom (3)]
+ , ae_cheap_fn env scrut Nothing -> ATop as
+ | exprOkForSpeculation scrut -> ATop as
+ | otherwise -> ATop (takeWhile isOneShotInfo as)
where
- -- is_under implements Note [Dealing with bottom (3)]
- is_under (Var f) = f `elem` ae_bndrs env
- is_under (App f (Type {})) = is_under f
- is_under (Cast f _) = is_under f
- is_under _ = False
-
alts_type = foldr1 andArityType [arityType env rhs | (_,_,rhs) <- alts]
arityType env (Let b e)
@@ -1025,4 +1008,3 @@ freshEtaId n subst ty
mkSysLocal (fsLit "eta") (mkBuiltinUnique n) ty'
subst' = extendTvInScope subst eta_id'
\end{code}
-
diff --git a/compiler/coreSyn/CoreFVs.lhs b/compiler/coreSyn/CoreFVs.lhs
index 636c049c4254..51b5f34d2690 100644
--- a/compiler/coreSyn/CoreFVs.lhs
+++ b/compiler/coreSyn/CoreFVs.lhs
@@ -5,6 +5,8 @@
Taken quite directly from the Peyton Jones/Lester paper.
\begin{code}
+{-# LANGUAGE CPP #-}
+
-- | A module concerned with finding the free variables of an expression.
module CoreFVs (
-- * Free variables of expressions and binding groups
@@ -254,7 +256,7 @@ exprOrphNames e
go (Coercion co) = orphNamesOfCo co
go (App e1 e2) = go e1 `unionNameSets` go e2
go (Lam v e) = go e `delFromNameSet` idName v
- go (Tick _ e) = go e
+ go (Tick _ e) = go e
go (Cast e co) = go e `unionNameSets` orphNamesOfCo co
go (Let (NonRec _ r) e) = go e `unionNameSets` go r
go (Let (Rec prs) e) = exprsOrphNames (map snd prs) `unionNameSets` go e
diff --git a/compiler/coreSyn/CoreLint.lhs b/compiler/coreSyn/CoreLint.lhs
index 836164e0ce9f..f4607823a8f4 100644
--- a/compiler/coreSyn/CoreLint.lhs
+++ b/compiler/coreSyn/CoreLint.lhs
@@ -7,19 +7,13 @@
A ``lint'' pass to check for Core correctness
\begin{code}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fprof-auto #-}
module CoreLint ( lintCoreBindings, lintUnfolding, lintExpr ) where
#include "HsVersions.h"
-import Demand
import CoreSyn
import CoreFVs
import CoreUtils
@@ -213,7 +207,8 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs)
; binder_ty <- applySubstTy binder_ty
; checkTys binder_ty ty (mkRhsMsg binder (ptext (sLit "RHS")) ty)
- -- Check (not isUnLiftedType) (also checks for bogus unboxed tuples)
+ -- Check the let/app invariant
+ -- See Note [CoreSyn let/app invariant] in CoreSyn
; checkL (not (isUnLiftedType binder_ty)
|| (isNonRec rec_flag && exprOkForSpeculation rhs))
(mkRhsPrimMsg binder rhs)
@@ -226,6 +221,7 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs)
-- Check that if the binder is local, it is not marked as exported
; checkL (not (isExportedId binder) || isTopLevel top_lvl_flag)
(mkNonTopExportedMsg binder)
+
-- Check that if the binder is local, it does not have an external name
; checkL (not (isExternalName (Var.varName binder)) || isTopLevel top_lvl_flag)
(mkNonTopExternalNameMsg binder)
@@ -239,9 +235,13 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs)
-- Check whether arity and demand type are consistent (only if demand analysis
-- already happened)
- ; checkL (case dmdTy of
- StrictSig dmd_ty -> idArity binder >= dmdTypeDepth dmd_ty || exprIsTrivial rhs)
- (mkArityMsg binder)
+ --
+ -- Note (Apr 2014): this is actually ok. See Note [Demand analysis for trivial right-hand sides]
+ -- in DmdAnal. After eta-expansion in CorePrep the rhs is no longer trivial.
+ -- ; let dmdTy = idStrictness binder
+ -- ; checkL (case dmdTy of
+ -- StrictSig dmd_ty -> idArity binder >= dmdTypeDepth dmd_ty || exprIsTrivial rhs)
+ -- (mkArityMsg binder)
; lintIdUnfolding binder binder_ty (idUnfolding binder) }
@@ -249,7 +249,6 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs)
-- the unfolding is a SimplifiableCoreExpr. Give up for now.
where
binder_ty = idType binder
- dmdTy = idStrictness binder
bndr_vars = varSetElems (idFreeVars binder)
-- If you edit this function, you may need to update the GHC formalism
@@ -454,6 +453,8 @@ lintCoreArg fun_ty (Type arg_ty)
lintCoreArg fun_ty arg
= do { arg_ty <- lintCoreExpr arg
+ ; checkL (not (isUnLiftedType arg_ty) || exprOkForSpeculation arg)
+ (mkLetAppMsg arg)
; lintValApp arg fun_ty arg_ty }
-----------------
@@ -854,6 +855,9 @@ lintCoercion co@(TyConAppCo r tc cos)
; checkRole co2 r r2
; return (rk, mkFunTy s1 s2, mkFunTy t1 t2, r) }
+ | Just {} <- synTyConDefn_maybe tc
+ = failWithL (ptext (sLit "Synonym in TyConAppCo:") <+> ppr co)
+
| otherwise
= do { (ks,ss,ts,rs) <- mapAndUnzip4M lintCoercion cos
; rk <- lint_co_app co (tyConKind tc) (ss `zip` ks)
@@ -1391,6 +1395,11 @@ mkRhsMsg binder what ty
hsep [ptext (sLit "Binder's type:"), ppr (idType binder)],
hsep [ptext (sLit "Rhs type:"), ppr ty]]
+mkLetAppMsg :: CoreExpr -> MsgDoc
+mkLetAppMsg e
+ = hang (ptext (sLit "This argument does not satisfy the let/app invariant:"))
+ 2 (ppr e)
+
mkRhsPrimMsg :: Id -> CoreExpr -> MsgDoc
mkRhsPrimMsg binder _rhs
= vcat [hsep [ptext (sLit "The type of this binder is primitive:"),
@@ -1421,6 +1430,7 @@ mkKindErrMsg tyvar arg_ty
hang (ptext (sLit "Arg type:"))
4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))]
+{- Not needed now
mkArityMsg :: Id -> MsgDoc
mkArityMsg binder
= vcat [hsep [ptext (sLit "Demand type has"),
@@ -1433,7 +1443,7 @@ mkArityMsg binder
]
where (StrictSig dmd_ty) = idStrictness binder
-
+-}
mkCastErr :: CoreExpr -> Coercion -> Type -> Type -> MsgDoc
mkCastErr expr co from_ty expr_ty
= vcat [ptext (sLit "From-type of Cast differs from type of enclosed expression"),
diff --git a/compiler/coreSyn/CorePrep.lhs b/compiler/coreSyn/CorePrep.lhs
index 5e0cd6599d27..4baac8cd0101 100644
--- a/compiler/coreSyn/CorePrep.lhs
+++ b/compiler/coreSyn/CorePrep.lhs
@@ -5,7 +5,7 @@
Core pass to saturate constructors and PrimOps
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP #-}
module CorePrep (
corePrepPgm, corePrepExpr, cvtLitInteger,
@@ -53,6 +53,8 @@ import Outputable
import Platform
import FastString
import Config
+import Name ( NamedThing(..), nameSrcSpan )
+import SrcLoc ( SrcSpan(..), realSrcLocSpan, mkRealSrcLoc )
import Data.Bits
import Data.List ( mapAccumL )
import Control.Monad
@@ -113,6 +115,9 @@ The goal of this pass is to prepare for code generation.
special case where we use the S# constructor for Integers that
are in the range of Int.
+11. Uphold tick consistency while doing this: Move floatable ticks
+ out of applications, and wrap floats for scoped ticks.
+
This is all done modulo type applications and abstractions, so that
when type erasure is done for conversion to STG, we don't end up with
any trivial or useless bindings.
@@ -157,13 +162,14 @@ type CpeRhs = CoreExpr -- Non-terminal 'rhs'
%************************************************************************
\begin{code}
-corePrepPgm :: DynFlags -> HscEnv -> CoreProgram -> [TyCon] -> IO CoreProgram
-corePrepPgm dflags hsc_env binds data_tycons = do
+corePrepPgm :: HscEnv -> ModLocation -> CoreProgram -> [TyCon] -> IO CoreProgram
+corePrepPgm hsc_env mod_loc binds data_tycons = do
+ let dflags = hsc_dflags hsc_env
showPass dflags "CorePrep"
us <- mkSplitUniqSupply 's'
initialCorePrepEnv <- mkInitialCorePrepEnv dflags hsc_env
- let implicit_binds = mkDataConWorkers data_tycons
+ let implicit_binds = mkDataConWorkers dflags mod_loc data_tycons
-- NB: we must feed mkImplicitBinds through corePrep too
-- so that they are suitably cloned and eta-expanded
@@ -173,7 +179,13 @@ corePrepPgm dflags hsc_env binds data_tycons = do
return (deFloatTop (floats1 `appendFloats` floats2))
endPass hsc_env CorePrep binds_out []
- return binds_out
+
+ -- Output of Core preparation might be useful for debugging
+ let binds_out'
+ | gopt Opt_DebugCore dflags = map annotateCoreNotes binds_out
+ | otherwise = binds_out
+
+ return binds_out'
corePrepExpr :: DynFlags -> HscEnv -> CoreExpr -> IO CoreExpr
corePrepExpr dflags hsc_env expr = do
@@ -194,13 +206,27 @@ corePrepTopBinds initialCorePrepEnv binds
binds' <- go env' binds
return (bind' `appendFloats` binds')
-mkDataConWorkers :: [TyCon] -> [CoreBind]
+mkDataConWorkers :: DynFlags -> ModLocation -> [TyCon] -> [CoreBind]
-- See Note [Data constructor workers]
-mkDataConWorkers data_tycons
- = [ NonRec id (Var id) -- The ice is thin here, but it works
+-- c.f. Note [Injecting implicit bindings] in TidyPgm
+mkDataConWorkers dflags mod_loc data_tycons
+ = [ NonRec id (tick_it (getName data_con) (Var id))
+ -- The ice is thin here, but it works
| tycon <- data_tycons, -- CorePrep will eta-expand it
data_con <- tyConDataCons tycon,
- let id = dataConWorkId data_con ]
+ let id = dataConWorkId data_con
+ ]
+ where
+ -- If debugging, we try to put a source note on the worker. This is
+ -- useful especially for heap profiling.
+ tick_it name
+ | not (gopt Opt_Debug dflags) = id
+ | RealSrcSpan span <- nameSrcSpan name = Tick (SourceNote span nameStr)
+ | Just file <- ml_hs_file mod_loc = Tick (SourceNote (dummySpan file) nameStr)
+ | otherwise = Tick (SourceNote (dummySpan "???") nameStr)
+ where nameStr = showSDoc dflags (ppr name)
+ dummySpan file = realSrcLocSpan $ mkRealSrcLoc (mkFastString file) 1 1
+
\end{code}
Note [Floating out of top level bindings]
@@ -387,7 +413,7 @@ cpePair top_lvl is_rec dmd is_unlifted env bndr rhs
; (floats2, rhs2) <- float_from_rhs floats1 rhs1
-- Make the arity match up
- ; (floats3, rhs')
+ ; (floats3, rhs3)
<- if manifestArity rhs1 <= arity
then return (floats2, cpeEtaExpand arity rhs2)
else WARN(True, text "CorePrep: silly extra arguments:" <+> ppr bndr)
@@ -397,15 +423,18 @@ cpePair top_lvl is_rec dmd is_unlifted env bndr rhs
; return ( addFloat floats2 float
, cpeEtaExpand arity (Var v)) })
+ -- Wrap floating ticks
+ ; let (floats4, rhs4) = wrapTicks floats3 rhs3
+
-- Record if the binder is evaluated
-- and otherwise trim off the unfolding altogether
-- It's not used by the code generator; getting rid of it reduces
-- heap usage and, since we may be changing uniques, we'd have
-- to substitute to keep it right
- ; let bndr' | exprIsHNF rhs' = bndr `setIdUnfolding` evaldUnfolding
+ ; let bndr' | exprIsHNF rhs3 = bndr `setIdUnfolding` evaldUnfolding
| otherwise = bndr `setIdUnfolding` noUnfolding
- ; return (floats3, bndr', rhs') }
+ ; return (floats4, bndr', rhs4) }
where
is_strict_or_unlifted = (isStrictDmd dmd) || is_unlifted
@@ -494,11 +523,13 @@ cpeRhsE env (Let bind expr)
; return (new_binds `appendFloats` floats, body) }
cpeRhsE env (Tick tickish expr)
- | ignoreTickish tickish
- = cpeRhsE env expr
- | otherwise -- Just SCCs actually
+ | tickishPlace tickish == PlaceNonLam && tickish `tickishScopesLike` SoftScope
+ = do { (floats, body) <- cpeRhsE env expr
+ -- See [Floating Ticks in CorePrep]
+ ; return (unitFloat (FloatTick tickish) `appendFloats` floats, body) }
+ | otherwise
= do { body <- cpeBodyNF env expr
- ; return (emptyFloats, Tick tickish' body) }
+ ; return (emptyFloats, mkTick tickish' body) }
where
tickish' | Breakpoint n fvs <- tickish
= Breakpoint n (map (lookupCorePrepEnv env) fvs)
@@ -577,9 +608,9 @@ rhsToBody :: CpeRhs -> UniqSM (Floats, CpeBody)
-- Remove top level lambdas by let-binding
rhsToBody (Tick t expr)
- | not (tickishScoped t) -- we can only float out of non-scoped annotations
+ | tickishScoped t == NoScope -- we can only float out of non-scoped annotations
= do { (floats, expr') <- rhsToBody expr
- ; return (floats, Tick t expr') }
+ ; return (floats, mkTick t expr') }
rhsToBody (Cast e co)
-- You can get things like
@@ -679,8 +710,10 @@ cpeApp env expr
; return (Cast fun' co, hd, ty2, floats, ss) }
collect_args (Tick tickish fun) depth
- | ignoreTickish tickish -- Drop these notes altogether
- = collect_args fun depth -- They aren't used by the code generator
+ | tickishPlace tickish == PlaceNonLam && tickish `tickishScopesLike` SoftScope
+ = do { (fun',hd,fun_ty,floats,ss) <- collect_args fun depth
+ -- See [Floating Ticks in CorePrep]
+ ; return (fun',hd,fun_ty,addFloat floats (FloatTick tickish),ss) }
-- N-variable fun, better let-bind it
collect_args fun depth
@@ -801,10 +834,6 @@ of the scope of a `seq`, or dropped the `seq` altogether.
%************************************************************************
\begin{code}
--- we don't ignore any Tickishes at the moment.
-ignoreTickish :: Tickish Id -> Bool
-ignoreTickish _ = False
-
cpe_ExprIsTrivial :: CoreExpr -> Bool
-- Version that doesn't consider an scc annotation to be trivial.
cpe_ExprIsTrivial (Var _) = True
@@ -908,6 +937,9 @@ tryEtaReducePrep bndrs (Let bind@(NonRec _ r) body)
where
fvs = exprFreeVars r
+tryEtaReducePrep bndrs (Tick tickish e)
+ = fmap (mkTick tickish) $ tryEtaReducePrep bndrs e
+
tryEtaReducePrep _ _ = Nothing
\end{code}
@@ -932,11 +964,14 @@ data FloatingBind
Id CpeBody
Bool -- The bool indicates "ok-for-speculation"
+ | FloatTick (Tickish Id)
+
data Floats = Floats OkToSpec (OrdList FloatingBind)
instance Outputable FloatingBind where
ppr (FloatLet b) = ppr b
ppr (FloatCase b r ok) = brackets (ppr ok) <+> ppr b <+> equals <+> ppr r
+ ppr (FloatTick t) = ppr t
instance Outputable Floats where
ppr (Floats flag fs) = ptext (sLit "Floats") <> brackets (ppr flag) <+>
@@ -982,6 +1017,7 @@ wrapBinds (Floats _ binds) body
where
mk_bind (FloatCase bndr rhs _) body = Case rhs bndr (exprType body) [(DEFAULT, [], body)]
mk_bind (FloatLet bind) body = Let bind body
+ mk_bind (FloatTick tickish) body = mkTick tickish body
addFloat :: Floats -> FloatingBind -> Floats
addFloat (Floats ok_to_spec floats) new_float
@@ -991,6 +1027,7 @@ addFloat (Floats ok_to_spec floats) new_float
check (FloatCase _ _ ok_for_spec)
| ok_for_spec = IfUnboxedOk
| otherwise = NotOkToSpec
+ check FloatTick{} = OkToSpec
-- The ok-for-speculation flag says that it's safe to
-- float this Case out of a let, and thereby do it more eagerly
-- We need the top-level flag because it's never ok to float
@@ -1114,9 +1151,9 @@ data CorePrepEnv = CPE {
lookupMkIntegerName :: DynFlags -> HscEnv -> IO Id
lookupMkIntegerName dflags hsc_env
- = if thisPackage dflags == primPackageId
+ = if thisPackage dflags == primPackageKey
then return $ panic "Can't use Integer in ghc-prim"
- else if thisPackage dflags == integerPackageId
+ else if thisPackage dflags == integerPackageKey
then return $ panic "Can't use Integer in integer"
else liftM tyThingId
$ initTcForLookup hsc_env (tcLookupGlobal mkIntegerName)
@@ -1191,4 +1228,51 @@ newVar ty
= seqType ty `seq` do
uniq <- getUniqueM
return (mkSysLocal (fsLit "sat") uniq ty)
+
+
+------------------------------------------------------------------------------
+-- Floating ticks
+-- ---------------------------------------------------------------------------
+--
+-- Note [Floating Ticks in CorePrep]
+--
+-- It might seem counter-intuitive to float ticks by default, given
+-- that we don't actually want to move them if we can help it. On the
+-- other hand, nothing gets very far in CorePrep anyway, and we want
+-- to preserve the order of let bindings and tick annotations in
+-- relation to each other. For example, if we just wrapped let floats
+-- when they pass through ticks, we might end up performing the
+-- following transformation:
+--
+-- src<...> let foo = bar in baz
+-- ==> let foo = src<...> bar in src<...> baz
+--
+-- Because the let-binding would float through the tick, and then
+-- immediately materialize, achieving nothing but decreasing tick
+-- accuracy. The only special case is the following scenario:
+--
+-- let foo = src<...> (let a = b in bar) in baz
+-- ==> let foo = src<...> bar; a = src<...> b in baz
+--
+-- Here we would not want the source tick to end up covering "baz" and
+-- therefore refrain from pushing ticks outside. Instead, we copy them
+-- into the floating binds (here "a") in cpePair. Note that where "b"
+-- or "bar" are (value) lambdas we have to push the annotations
+-- further inside in order to uphold our rules.
+--
+-- All of this is implemented below in @wrapTicks@.
+
+-- | Like wrapFloats, but only wraps tick floats
+wrapTicks :: Floats -> CoreExpr -> (Floats, CoreExpr)
+wrapTicks (Floats flag floats0) expr = (Floats flag floats1, expr')
+ where (floats1, expr') = foldrOL go (nilOL, expr) floats0
+ go (FloatTick t) (fs, e) = ASSERT(tickishPlace t == PlaceNonLam)
+ (mapOL (wrap t) fs, mkTick t e)
+ go other (fs, e) = (other `consOL` fs, e)
+ wrap t (FloatLet bind) = FloatLet (wrapBind t bind)
+ wrap t (FloatCase b r ok) = FloatCase b (mkTick t r) ok
+ wrap _ other = pprPanic "wrapTicks: unexpected float!" (ppr other)
+ wrapBind t (NonRec binder rhs) = NonRec binder (mkTick t rhs)
+ wrapBind t (Rec pairs) = Rec (mapSnd (mkTick t) pairs)
+
\end{code}
diff --git a/compiler/coreSyn/CoreSubst.lhs b/compiler/coreSyn/CoreSubst.lhs
index fef3e86a2e54..5ab82fa9aeee 100644
--- a/compiler/coreSyn/CoreSubst.lhs
+++ b/compiler/coreSyn/CoreSubst.lhs
@@ -6,7 +6,8 @@
Utility functions on @Core@ syntax
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -23,7 +24,7 @@ module CoreSubst (
substTy, substCo, substExpr, substExprSC, substBind, substBindSC,
substUnfolding, substUnfoldingSC,
lookupIdSubst, lookupTvSubst, lookupCvSubst, substIdOcc,
- substTickish,
+ substTickish, substVarSet,
-- ** Operations on substitutions
emptySubst, mkEmptySubst, mkSubst, mkOpenSubst, substInScope, isEmptySubst,
@@ -40,7 +41,7 @@ module CoreSubst (
-- ** Simple expression optimiser
simpleOptPgm, simpleOptExpr, simpleOptExprWith,
- exprIsConApp_maybe, exprIsLiteral_maybe
+ exprIsConApp_maybe, exprIsLiteral_maybe, exprIsLambda_maybe,
) where
#include "HsVersions.h"
@@ -61,7 +62,7 @@ import Coercion hiding ( substTy, substCo, extendTvSubst, substTyVarBndr, substC
import TyCon ( tyConArity )
import DataCon
-import PrelNames ( eqBoxDataConKey )
+import PrelNames ( eqBoxDataConKey, coercibleDataConKey )
import OptCoercion ( optCoercion )
import PprCore ( pprCoreBindings, pprRules )
import Module ( Module )
@@ -355,7 +356,7 @@ instance Outputable Subst where
%************************************************************************
\begin{code}
--- | Apply a substititon to an entire 'CoreExpr'. Rememeber, you may only
+-- | Apply a substitution to an entire 'CoreExpr'. Remember, you may only
-- apply the substitution /once/: see "CoreSubst#apply_once"
--
-- Do *not* attempt to short-cut in the case of an empty substitution!
@@ -402,8 +403,8 @@ subst_expr subst expr
where
(subst', bndrs') = substBndrs subst bndrs
--- | Apply a substititon to an entire 'CoreBind', additionally returning an updated 'Subst'
--- that should be used by subsequent substitutons.
+-- | Apply a substitution to an entire 'CoreBind', additionally returning an updated 'Subst'
+-- that should be used by subsequent substitutions.
substBind, substBindSC :: Subst -> CoreBind -> (Subst, CoreBind)
substBindSC subst bind -- Short-cut if the substitution is empty
@@ -460,7 +461,7 @@ preserve occ info in rules.
\begin{code}
-- | Substitutes a 'Var' for another one according to the 'Subst' given, returning
--- the result and an updated 'Subst' that should be used by subsequent substitutons.
+-- the result and an updated 'Subst' that should be used by subsequent substitutions.
-- 'IdInfo' is preserved by this process, although it is substituted into appropriately.
substBndr :: Subst -> Var -> (Subst, Var)
substBndr subst bndr
@@ -484,7 +485,7 @@ substRecBndrs subst bndrs
\begin{code}
substIdBndr :: SDoc
-> Subst -- ^ Substitution to use for the IdInfo
- -> Subst -> Id -- ^ Substitition and Id to transform
+ -> Subst -> Id -- ^ Substitution and Id to transform
-> (Subst, Id) -- ^ Transformed pair
-- NB: unfolding may be zapped
@@ -555,7 +556,7 @@ cloneRecIdBndrs subst us ids
-- Just like substIdBndr, except that it always makes a new unique
-- It is given the unique to use
clone_id :: Subst -- Substitution for the IdInfo
- -> Subst -> (Id, Unique) -- Substitition and Id to transform
+ -> Subst -> (Id, Unique) -- Substitution and Id to transform
-> (Subst, Id) -- Transformed pair
clone_id rec_subst subst@(Subst in_scope idvs tvs cvs) (old_id, uniq)
@@ -966,6 +967,14 @@ simple_app subst (Lam b e) (a:as)
where
(subst', b') = subst_opt_bndr subst b
b2 = add_info subst' b b'
+simple_app subst (Var v) as
+ | isCompulsoryUnfolding (idUnfolding v)
+ -- See Note [Unfold compulsory unfoldings in LHSs]
+ = simple_app subst (unfoldingTemplate (idUnfolding v)) as
+simple_app subst (Tick t e) as
+ -- Okay to do "(Tick t e) x ==> Tick t (e x)"?
+ | t `tickishScopesLike` SoftScope
+ = mkTick t $ simple_app subst e as
simple_app subst e as
= foldl App (simple_opt_expr subst e) as
@@ -1039,7 +1048,7 @@ maybe_substitute subst b r
trivial | exprIsTrivial r = True
| (Var fun, args) <- collectArgs r
, Just dc <- isDataConWorkId_maybe fun
- , dc `hasKey` eqBoxDataConKey
+ , dc `hasKey` eqBoxDataConKey || dc `hasKey` coercibleDataConKey
, all exprIsTrivial args = True -- See Note [Optimise coercion boxes agressively]
| otherwise = False
@@ -1112,6 +1121,12 @@ we don't know what phase we're in. Here's an example
When inlining 'foo' in 'bar' we want the let-binding for 'inner'
to remain visible until Phase 1
+Note [Unfold compulsory unfoldings in LHSs]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When the user writes `map coerce = coerce` as a rule, the rule will only ever
+match if we replace coerce by its unfolding on the LHS, because that is the
+core that the rule matching engine will find. So do that for everything that
+has a compulsory unfolding. Also see Note [Desugaring coerce as cast] in Desugar
%************************************************************************
%* *
@@ -1289,4 +1304,77 @@ exprIsLiteral_maybe env@(_, id_unf) e
Var v | Just rhs <- expandUnfolding_maybe (id_unf v)
-> exprIsLiteral_maybe env rhs
_ -> Nothing
-\end{code}
+\end{code}
+
+Note [exprIsLambda_maybe]
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+exprIsLambda_maybe will, given an expression `e`, try to turn it into the form
+`Lam v e'` (returned as `Just (v,e')`). Besides using lambdas, it looks through
+casts (using the Push rule), and it unfolds function calls if the unfolding
+has a greater arity than arguments are present.
+
+Currently, it is used in Rules.match, and is required to make
+"map coerce = coerce" match.
+
+\begin{code}
+exprIsLambda_maybe :: InScopeEnv -> CoreExpr -> Maybe (Var, CoreExpr)
+ -- See Note [exprIsLambda_maybe]
+
+-- The simple case: It is a lambda already
+exprIsLambda_maybe _ (Lam x e)
+ = Just (x, e)
+
+-- Also possible: A casted lambda. Push the coercion inside
+exprIsLambda_maybe (in_scope_set, id_unf) (Cast casted_e co)
+ | Just (x, e) <- exprIsLambda_maybe (in_scope_set, id_unf) casted_e
+ -- Only do value lambdas.
+ -- this implies that x is not in scope in gamma (makes this code simpler)
+ , not (isTyVar x) && not (isCoVar x)
+ , ASSERT( not $ x `elemVarSet` tyCoVarsOfCo co) True
+ , let res = pushCoercionIntoLambda in_scope_set x e co
+ = -- pprTrace "exprIsLambda_maybe:Cast" (vcat [ppr casted_e, ppr co, ppr res])
+ res
+
+-- Another attempt: See if we find a partial unfolding
+exprIsLambda_maybe (in_scope_set, id_unf) e
+ | (Var f, as) <- collectArgs e
+ , let unfolding = id_unf f
+ , Just rhs <- expandUnfolding_maybe unfolding
+ -- Make sure there is hope to get a lambda
+ , unfoldingArity unfolding > length (filter isValArg as)
+ -- Optimize, for beta-reduction
+ , let e' = simpleOptExprWith (mkEmptySubst in_scope_set) (rhs `mkApps` as)
+ -- Recurse, because of possible casts
+ , Just (x', e'') <- exprIsLambda_maybe (in_scope_set, id_unf) e'
+ , let res = Just (x', e'')
+ = -- pprTrace "exprIsLambda_maybe:Unfold" (vcat [ppr e, ppr res])
+ res
+
+exprIsLambda_maybe _ _e
+ = -- pprTrace "exprIsLambda_maybe:Fail" (vcat [ppr _e])
+ Nothing
+
+
+pushCoercionIntoLambda
+ :: InScopeSet -> Var -> CoreExpr -> Coercion -> Maybe (Var, CoreExpr)
+pushCoercionIntoLambda in_scope x e co
+ -- This implements the Push rule from the paper on coercions
+ -- Compare with simplCast in Simplify
+ | ASSERT(not (isTyVar x) && not (isCoVar x)) True
+ , Pair s1s2 t1t2 <- coercionKind co
+ , Just (_s1,_s2) <- splitFunTy_maybe s1s2
+ , Just (t1,_t2) <- splitFunTy_maybe t1t2
+ = let [co1, co2] = decomposeCo 2 co
+ -- Should we optimize the coercions here?
+ -- Otherwise they might not match too well
+ x' = x `setIdType` t1
+ in_scope' = in_scope `extendInScopeSet` x'
+ subst = extendIdSubst (mkEmptySubst in_scope')
+ x
+ (mkCast (Var x') co1)
+ in Just (x', subst_expr subst e `mkCast` co2)
+ | otherwise
+ = pprTrace "exprIsLambda_maybe: Unexpected lambda in case" (ppr (Lam x e))
+ Nothing
+
+\end{code}
diff --git a/compiler/coreSyn/CoreSyn.lhs b/compiler/coreSyn/CoreSyn.lhs
index 3dc8eeb31f7a..782697a24390 100644
--- a/compiler/coreSyn/CoreSyn.lhs
+++ b/compiler/coreSyn/CoreSyn.lhs
@@ -4,9 +4,8 @@
%
\begin{code}
-{-# LANGUAGE DeriveDataTypeable, DeriveFunctor #-}
-
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, DeriveDataTypeable, DeriveFunctor #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -16,9 +15,10 @@
-- | CoreSyn holds all the main data types for use by for the Glasgow Haskell Compiler midsection
module CoreSyn (
-- * Main data types
- Expr(..), Alt, Bind(..), AltCon(..), Arg, Tickish(..),
+ Expr(..), Alt, Bind(..), AltCon(..), Arg,
+ Tickish(..), RawTickish, TickishScoping(..), TickishPlacement(..), ExprPtr(..),
CoreProgram, CoreExpr, CoreAlt, CoreBind, CoreArg, CoreBndr,
- TaggedExpr, TaggedAlt, TaggedBind, TaggedArg, TaggedBndr(..),
+ TaggedExpr, TaggedAlt, TaggedBind, TaggedArg, TaggedBndr(..), deTagExpr,
-- ** 'Expr' construction
mkLets, mkLams,
@@ -44,8 +44,11 @@ module CoreSyn (
isValArg, isTypeArg, isTyCoArg, valArgCount, valBndrCount,
isRuntimeArg, isRuntimeVar,
- tickishCounts, tickishScoped, tickishIsCode, mkNoCount, mkNoScope,
- tickishCanSplit,
+ tickishCounts, tickishScoped, tickishScopesLike, tickishFloatable,
+ tickishCanSplit, mkNoCount, mkNoScope,
+ tickishIsCode, tickishPlace,
+ exprPtrCons,
+ tickishContains,
-- * Unfolding data types
Unfolding(..), UnfoldingGuidance(..), UnfoldingSource(..),
@@ -105,6 +108,7 @@ import DynFlags
import FastString
import Outputable
import Util
+import SrcLoc ( RealSrcSpan, containsSpan )
import Data.Data hiding (TyCon)
import Data.Int
@@ -181,25 +185,8 @@ These data types are the heart of the compiler
-- /must/ be of lifted type (see "Type#type_classification" for
-- the meaning of /lifted/ vs. /unlifted/).
--
--- #let_app_invariant#
--- The right hand side of of a non-recursive 'Let'
--- _and_ the argument of an 'App',
--- /may/ be of unlifted type, but only if the expression
--- is ok-for-speculation. This means that the let can be floated
--- around without difficulty. For example, this is OK:
---
--- > y::Int# = x +# 1#
---
--- But this is not, as it may affect termination if the
--- expression is floated out:
---
--- > y::Int# = fac 4#
---
--- In this situation you should use @case@ rather than a @let@. The function
--- 'CoreUtils.needsCaseBinding' can help you determine which to generate, or
--- alternatively use 'MkCore.mkCoreLet' rather than this constructor directly,
--- which will generate a @case@ if necessary
---
+-- See Note [CoreSyn let/app invariant]
+--
-- #type_let#
-- We allow a /non-recursive/ let to bind a type variable, thus:
--
@@ -360,9 +347,28 @@ See #letrec_invariant#
Note [CoreSyn let/app invariant]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-See #let_app_invariant#
+The let/app invariant
+ the right hand side of of a non-recursive 'Let', and
+ the argument of an 'App',
+ /may/ be of unlifted type, but only if
+ the expression is ok-for-speculation.
+
+This means that the let can be floated around
+without difficulty. For example, this is OK:
-This is intially enforced by DsUtils.mkCoreLet and mkCoreApp
+ y::Int# = x +# 1#
+
+But this is not, as it may affect termination if the
+expression is floated out:
+
+ y::Int# = fac 4#
+
+In this situation you should use @case@ rather than a @let@. The function
+'CoreUtils.needsCaseBinding' can help you determine which to generate, or
+alternatively use 'MkCore.mkCoreLet' rather than this constructor directly,
+which will generate a @case@ if necessary
+
+Th let/app invariant is initially enforced by DsUtils.mkCoreLet and mkCoreApp
Note [CoreSyn case invariants]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -395,7 +401,7 @@ Here's another example:
f :: T -> Bool
f = \(x:t). case x of Bool {}
Since T has no data constructors, the case alternatives are of course
-empty. However note that 'x' is not bound to a visbily-bottom value;
+empty. However note that 'x' is not bound to a visibly-bottom value;
it's the *type* that tells us it's going to diverge. Its a bit of a
degnerate situation but we do NOT want to replace
case x of Bool {} --> error Bool "Inaccessible case"
@@ -472,8 +478,48 @@ data Tickish id =
-- Note [substTickish] in CoreSubst.
}
+ -- | A source note.
+ --
+ -- Source notes are pure annotations: Their presence should neither
+ -- influence compilation nor execution. The semantics are given by
+ -- causality: The presence of a source note means that a local
+ -- change in the referenced source code span will possibly provoke
+ -- the generated code to change. On the flip-side, the functionality
+ -- of annotated code *must* be invariant against changes to all
+ -- source code *except* the spans referenced in the source notes
+ -- (see "Causality of optimized Haskell" paper for details).
+ --
+ -- Therefore extending the scope of any given source note is always
+ -- valid. Note that it is still undesirable though, as this reduces
+ -- their usefulness for debugging and profiling. Therefore we will
+ -- generally try only to make use of this property where it is
+ -- neccessary to enable optimizations.
+ | SourceNote
+ { sourceSpan :: RealSrcSpan -- ^ Source covered
+ , sourceName :: String -- ^ Name for source location
+ -- (uses same names as CCs)
+ }
+
+ -- | A core note. These types of ticks only live after Core2Stg and
+ -- carry the core that a piece of Stg was generated from.
+ | CoreNote
+ { coreBind :: Var -- ^ Name the core fragment is bound to
+ , coreNote :: ExprPtr Var -- ^ Source covered
+ }
+
deriving (Eq, Ord, Data, Typeable)
+-- | Tickish out of Core context
+type RawTickish = Tickish ()
+
+-- | Pointer to a Core expression or case alternative. Ignored for the
+-- purpose of equality checks.
+data ExprPtr id = ExprPtr (Expr id)
+ | AltPtr (Alt id)
+ deriving (Data, Typeable)
+instance Eq (ExprPtr id) where _ == _ = True
+instance Ord (ExprPtr id) where compare _ _ = EQ
+
-- | A "counting tick" (where tickishCounts is True) is one that
-- counts evaluations in some way. We cannot discard a counting tick,
@@ -483,41 +529,210 @@ data Tickish id =
-- However, we still allow the simplifier to increase or decrease
-- sharing, so in practice the actual number of ticks may vary, except
-- that we never change the value from zero to non-zero or vice versa.
---
tickishCounts :: Tickish id -> Bool
tickishCounts n@ProfNote{} = profNoteCount n
tickishCounts HpcTick{} = True
tickishCounts Breakpoint{} = True
-
-tickishScoped :: Tickish id -> Bool
-tickishScoped n@ProfNote{} = profNoteScope n
-tickishScoped HpcTick{} = False
-tickishScoped Breakpoint{} = True
+tickishCounts _ = False
+
+
+-- | Specifies the scoping behaviour of ticks. This governs the
+-- behaviour of ticks that care about the covered code and the cost
+-- associated with it. Important for ticks relating to profiling.
+data TickishScoping =
+ -- | No scoping: The tick does not care about what code it
+ -- covers. Transformations can freely move code inside as well as
+ -- outside without any additional annotation obligations
+ NoScope
+
+ -- | Soft scoping: We want all code that is covered to stay
+ -- covered. Note that this scope type does not forbid
+ -- transformations from happening, as as long as all results of
+ -- the transformations are still covered by this tick or a copy of
+ -- it. For example
+ --
+ -- let x = tick<...> (let y = foo in bar) in baz
+ -- ===>
+ -- let x = tick<...> bar; y = tick<...> foo in baz
+ --
+ -- Is a valid transformation as far as "bar" and "foo" is
+ -- concerned, because both still are scoped over by the tick.
+ --
+ -- Note though that one might object to the "let" not being
+ -- covered by the tick any more. However, we are generally lax
+ -- with this - constant costs don't matter too much, and given
+ -- that the "let" was effectively merged we can view it as having
+ -- lost its identity anyway.
+ --
+ -- Also note that this scoping behaviour allows floating a tick
+ -- "upwards" in pretty much any situation. For example:
+ --
+ -- case foo of x -> tick<...> bar
+ -- ==>
+ -- tick<...> case foo of x -> bar
+ --
+ -- While this is always leagl, we want to make a best effort to
+ -- only make us of this where it exposes transformation
+ -- opportunities.
+ | SoftScope
+
+ -- | Cost centre scoping: We don't want any costs to move to other
+ -- cost-centre stacks. This means we not only want no code or cost
+ -- to get moved out of their cost centres, but we also object to
+ -- code getting associated with new cost-centre ticks - or
+ -- changing the order in which they get applied.
+ --
+ -- A rule of thumb is that we don't want any code to gain new
+ -- annotations. However, there are notable exceptions, for
+ -- example:
+ --
+ -- let f = \y -> foo in tick<...> ... (f x) ...
+ -- ==>
+ -- tick<...> ... foo[x/y] ...
+ --
+ -- In-lining lambdas like this is always legal, because inlining a
+ -- function does not change the cost-centre stack when the
+ -- function is called.
+ | CostCentreScope
+
+ deriving (Eq)
+
+-- | Returns the intended scoping rule for a Tickish
+tickishScoped :: Tickish id -> TickishScoping
+tickishScoped n@ProfNote{}
+ | profNoteScope n = CostCentreScope
+ | otherwise = NoScope
+tickishScoped HpcTick{} = NoScope
+tickishScoped Breakpoint{} = CostCentreScope
-- Breakpoints are scoped: eventually we're going to do call
-- stacks, but also this helps prevent the simplifier from moving
-- breakpoints around and changing their result type (see #1531).
+tickishScoped SourceNote{} = SoftScope
+tickishScoped CoreNote{} = SoftScope
+
+-- | Returns whether the tick scoping rule is at least as permissive
+-- as the given scoping rule.
+tickishScopesLike :: Tickish id -> TickishScoping -> Bool
+tickishScopesLike t scope = tickishScoped t `like` scope
+ where NoScope `like` _ = True
+ _ `like` NoScope = False
+ SoftScope `like` _ = True
+ _ `like` SoftScope = False
+ CostCentreScope `like` _ = True
+
+-- | Returns @True@ for ticks that can be floated upwards easily even
+-- where it might change execution counts, such as:
+--
+-- Just (tick<...> foo)
+-- ==>
+-- tick<...> (Just foo)
+--
+-- This is a combination of @tickishSoftScope@ and
+-- @tickishCounts@. Note that in principle splittable ticks can become
+-- floatable using @mkNoTick@ -- even though there's currently no
+-- tickish for which that is the case.
+tickishFloatable :: Tickish id -> Bool
+tickishFloatable t = t `tickishScopesLike` SoftScope && not (tickishCounts t)
+
+-- | Returns @True@ for a tick that is both counting /and/ scoping and
+-- can be split into its (tick, scope) parts using 'mkNoScope' and
+-- 'mkNoTick' respectively.
+tickishCanSplit :: Tickish id -> Bool
+tickishCanSplit ProfNote{profNoteScope = True, profNoteCount = True}
+ = True
+tickishCanSplit _ = False
mkNoCount :: Tickish id -> Tickish id
-mkNoCount n@ProfNote{} = n {profNoteCount = False}
-mkNoCount Breakpoint{} = panic "mkNoCount: Breakpoint" -- cannot split a BP
-mkNoCount HpcTick{} = panic "mkNoCount: HpcTick"
+mkNoCount n | not (tickishCounts n) = n
+ | not (tickishCanSplit n) = panic "mkNoCount: Cannot split!"
+mkNoCount n@ProfNote{} = n {profNoteCount = False}
+mkNoCount _ = panic "mkNoCount: Undefined split!"
mkNoScope :: Tickish id -> Tickish id
-mkNoScope n@ProfNote{} = n {profNoteScope = False}
-mkNoScope Breakpoint{} = panic "mkNoScope: Breakpoint" -- cannot split a BP
-mkNoScope HpcTick{} = panic "mkNoScope: HpcTick"
-
--- | Return True if this source annotation compiles to some code, or will
--- disappear before the backend.
+mkNoScope n | tickishScoped n == NoScope = n
+ | not (tickishCanSplit n) = panic "mkNoScope: Cannot split!"
+mkNoScope n@ProfNote{} = n {profNoteScope = False}
+mkNoScope _ = panic "mkNoScope: Undefined split!"
+
+-- | Return @True@ if this source annotation compiles to some backend
+-- code. Without this flag, the tickish is seen as a simple annotation
+-- that does not have any associated evaluation code.
+--
+-- What this means that we are allowed to disregard the tick if doing
+-- so means that we can skip generating any code in the first place. A
+-- typical example is top-level bindings:
+--
+-- foo = tick<...> \y -> ...
+-- ==>
+-- foo = \y -> tick<...> ...
+--
+-- Here there is just no operational difference between the first and
+-- the second version. Therefore code generation should simply
+-- translate the code as if it found the latter.
tickishIsCode :: Tickish id -> Bool
-tickishIsCode _tickish = True -- all of them for now
-
--- | Return True if this Tick can be split into (tick,scope) parts with
--- 'mkNoScope' and 'mkNoCount' respectively.
-tickishCanSplit :: Tickish Id -> Bool
-tickishCanSplit Breakpoint{} = False
-tickishCanSplit HpcTick{} = False
-tickishCanSplit _ = True
+tickishIsCode SourceNote{} = False
+tickishIsCode CoreNote{} = False
+tickishIsCode _tickish = True -- all the rest for now
+
+
+-- | Governs the kind of expression that the tick gets placed on when
+-- annotating for example using @mkTick@. If we find that we want to
+-- put a tickish on an expression ruled out here, we try to float it
+-- inwards until we find a suitable expression.
+data TickishPlacement =
+
+ -- | Place ticks exactly on run-time expressions. We can still
+ -- move the tick through pure compile-time constructs such as
+ -- other ticks, casts or type lambdas. This is the most
+ -- restrictive placement rule for ticks, as all tickishs have in
+ -- common that they want to track runtime processes. The only
+ -- legal placement rule for counting ticks.
+ PlaceRuntime
+
+ -- | As @PlaceRuntime@, but we float the tick through all
+ -- lambdas. This makes sense where there is little difference
+ -- between annotating the lambda and annotating the lambda's code.
+ | PlaceNonLam
+
+ -- | In addition to floating through lambdas, cost-centre style
+ -- tickishs can also be moved from constructors, non-function
+ -- variables and literals. For example:
+ --
+ -- let x = scc<...> C (scc<...> y) (scc<...> 3) in ...
+ --
+ -- Neither the constructor application, the variable or the
+ -- literal are likely to have any cost worth mentioning. And even
+ -- if y names a thunk, the call would not care about the
+ -- evaluation context. Therefore removing all annotations in the
+ -- above example is safe.
+ | PlaceCostCentre
+
+ deriving (Eq)
+
+-- | Placement behaviour we want for the ticks
+tickishPlace :: Tickish id -> TickishPlacement
+tickishPlace n@ProfNote{}
+ | profNoteCount n = PlaceRuntime
+ | otherwise = PlaceCostCentre
+tickishPlace HpcTick{} = PlaceRuntime
+tickishPlace Breakpoint{} = PlaceRuntime
+tickishPlace SourceNote{} = PlaceNonLam
+tickishPlace CoreNote{} = PlaceNonLam
+
+-- | Returns whether one tick "contains" the other one, therefore
+-- making the second tick redundant.
+tickishContains :: Tickish Id -> Tickish Id -> Bool
+tickishContains (SourceNote sp1 n1) (SourceNote sp2 n2)
+ = n1 == n2 && containsSpan sp1 sp2
+tickishContains t1 t2
+ = t1 == t2
+
+-- | Gives constructor of expr pointer or DEFAULT if not an
+-- alternative.
+exprPtrCons :: ExprPtr a -> AltCon
+exprPtrCons ExprPtr{} = DEFAULT
+exprPtrCons (AltPtr (con,_,_)) = con
+
\end{code}
@@ -1106,6 +1321,25 @@ instance Outputable b => OutputableBndr (TaggedBndr b) where
pprBndr _ b = ppr b -- Simple
pprInfixOcc b = ppr b
pprPrefixOcc b = ppr b
+
+deTagExpr :: TaggedExpr t -> CoreExpr
+deTagExpr (Var v) = Var v
+deTagExpr (Lit l) = Lit l
+deTagExpr (Type ty) = Type ty
+deTagExpr (Coercion co) = Coercion co
+deTagExpr (App e1 e2) = App (deTagExpr e1) (deTagExpr e2)
+deTagExpr (Lam (TB b _) e) = Lam b (deTagExpr e)
+deTagExpr (Let bind body) = Let (deTagBind bind) (deTagExpr body)
+deTagExpr (Case e (TB b _) ty alts) = Case (deTagExpr e) b ty (map deTagAlt alts)
+deTagExpr (Tick t e) = Tick t (deTagExpr e)
+deTagExpr (Cast e co) = Cast (deTagExpr e) co
+
+deTagBind :: TaggedBind t -> CoreBind
+deTagBind (NonRec (TB b _) rhs) = NonRec b (deTagExpr rhs)
+deTagBind (Rec prs) = Rec [(b, deTagExpr rhs) | (TB b _, rhs) <- prs]
+
+deTagAlt :: TaggedAlt t -> CoreAlt
+deTagAlt (con, bndrs, rhs) = (con, [b | TB b _ <- bndrs], deTagExpr rhs)
\end{code}
@@ -1197,8 +1431,9 @@ mkDoubleLitDouble :: Double -> Expr b
mkDoubleLit d = Lit (mkMachDouble d)
mkDoubleLitDouble d = Lit (mkMachDouble (toRational d))
--- | Bind all supplied binding groups over an expression in a nested let expression. Prefer to
--- use 'MkCore.mkCoreLets' if possible
+-- | Bind all supplied binding groups over an expression in a nested let expression. Assumes
+-- that the rhs satisfies the let/app invariant. Prefer to use 'MkCore.mkCoreLets' if
+-- possible, which does guarantee the invariant
mkLets :: [Bind b] -> Expr b -> Expr b
-- | Bind all supplied binders over an expression in a nested lambda expression. Prefer to
-- use 'MkCore.mkCoreLams' if possible
@@ -1375,8 +1610,8 @@ seqExpr (Lam b e) = seqBndr b `seq` seqExpr e
seqExpr (Let b e) = seqBind b `seq` seqExpr e
seqExpr (Case e b t as) = seqExpr e `seq` seqBndr b `seq` seqType t `seq` seqAlts as
seqExpr (Cast e co) = seqExpr e `seq` seqCo co
-seqExpr (Tick n e) = seqTickish n `seq` seqExpr e
-seqExpr (Type t) = seqType t
+seqExpr (Tick n e) = seqTickish n `seq` seqExpr e
+seqExpr (Type t) = seqType t
seqExpr (Coercion co) = seqCo co
seqExprs :: [CoreExpr] -> ()
@@ -1387,6 +1622,8 @@ seqTickish :: Tickish Id -> ()
seqTickish ProfNote{ profNoteCC = cc } = cc `seq` ()
seqTickish HpcTick{} = ()
seqTickish Breakpoint{ breakpointFVs = ids } = seqBndrs ids
+seqTickish SourceNote{} = ()
+seqTickish CoreNote{} = ()
seqBndr :: CoreBndr -> ()
seqBndr b = b `seq` ()
diff --git a/compiler/coreSyn/CoreTidy.lhs b/compiler/coreSyn/CoreTidy.lhs
index 8c0ae4a65ac7..4754aa5afbfe 100644
--- a/compiler/coreSyn/CoreTidy.lhs
+++ b/compiler/coreSyn/CoreTidy.lhs
@@ -7,7 +7,8 @@ This module contains "tidying" code for *nested* expressions, bindings, rules.
The code for *top-level* bindings is in TidyPgm.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -33,7 +34,6 @@ import Name hiding (tidyNameOcc)
import SrcLoc
import Maybes
import Data.List
-import Outputable
\end{code}
@@ -141,18 +141,48 @@ tidyBndr env var
tidyBndrs :: TidyEnv -> [Var] -> (TidyEnv, [Var])
tidyBndrs env vars = mapAccumL tidyBndr env vars
+-- Non-top-level variables
+tidyIdBndr :: TidyEnv -> Id -> (TidyEnv, Id)
+tidyIdBndr env@(tidy_env, var_env) id
+ = -- Do this pattern match strictly, otherwise we end up holding on to
+ -- stuff in the OccName.
+ case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') ->
+ let
+ -- Give the Id a fresh print-name, *and* rename its type
+ -- The SrcLoc isn't important now,
+ -- though we could extract it from the Id
+ --
+ ty' = tidyType env (idType id)
+ name' = mkInternalName (idUnique id) occ' noSrcSpan
+ id' = mkLocalIdWithInfo name' ty' new_info
+ var_env' = extendVarEnv var_env id id'
+
+ -- Note [Tidy IdInfo]
+ new_info = vanillaIdInfo `setOccInfo` occInfo old_info
+ `setUnfoldingInfo` new_unf
+ old_info = idInfo id
+ old_unf = unfoldingInfo old_info
+ new_unf | isEvaldUnfolding old_unf = evaldUnfolding
+ | otherwise = noUnfolding
+ -- See Note [Preserve evaluatedness]
+ in
+ ((tidy_env', var_env'), id')
+ }
+
tidyLetBndr :: TidyEnv -- Knot-tied version for unfoldings
-> TidyEnv -- The one to extend
-> (Id, CoreExpr) -> (TidyEnv, Var)
-- Used for local (non-top-level) let(rec)s
-tidyLetBndr rec_tidy_env env (id,rhs)
- = ((tidy_occ_env,new_var_env), final_id)
- where
- ((tidy_occ_env,var_env), new_id) = tidyIdBndr env id
- new_var_env = extendVarEnv var_env id final_id
- -- Override the env we get back from tidyId with the
- -- new IdInfo so it gets propagated to the usage sites.
+-- Just like tidyIdBndr above, but with more IdInfo
+tidyLetBndr rec_tidy_env env@(tidy_env, var_env) (id,rhs)
+ = case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') ->
+ let
+ ty' = tidyType env (idType id)
+ name' = mkInternalName (idUnique id) occ' noSrcSpan
+ id' = mkLocalIdWithInfo name' ty' new_info
+ var_env' = extendVarEnv var_env id id'
+ -- Note [Tidy IdInfo]
-- We need to keep around any interesting strictness and
-- demand info because later on we may need to use it when
-- converting to A-normal form.
@@ -161,48 +191,27 @@ tidyLetBndr rec_tidy_env env (id,rhs)
-- into case (g x) of z -> f z by CorePrep, but only if f still
-- has its strictness info.
--
- -- Similarly for the demand info - on a let binder, this tells
+ -- Similarly for the demand info - on a let binder, this tells
-- CorePrep to turn the let into a case.
--
-- Similarly arity info for eta expansion in CorePrep
- --
- -- Set inline-prag info so that we preseve it across
+ --
+ -- Set inline-prag info so that we preseve it across
-- separate compilation boundaries
- final_id = new_id `setIdInfo` new_info
- idinfo = idInfo id
- new_info = idInfo new_id
- `setArityInfo` exprArity rhs
- `setStrictnessInfo` strictnessInfo idinfo
- `setDemandInfo` demandInfo idinfo
- `setInlinePragInfo` inlinePragInfo idinfo
- `setUnfoldingInfo` new_unf
-
- new_unf | isStableUnfolding unf = tidyUnfolding rec_tidy_env unf (panic "tidy_unf")
- | otherwise = noUnfolding
- unf = unfoldingInfo idinfo
-
--- Non-top-level variables
-tidyIdBndr :: TidyEnv -> Id -> (TidyEnv, Id)
-tidyIdBndr env@(tidy_env, var_env) id
- = -- Do this pattern match strictly, otherwise we end up holding on to
- -- stuff in the OccName.
- case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') ->
- let
- -- Give the Id a fresh print-name, *and* rename its type
- -- The SrcLoc isn't important now,
- -- though we could extract it from the Id
- --
- ty' = tidyType env (idType id)
- name' = mkInternalName (idUnique id) occ' noSrcSpan
- id' = mkLocalIdWithInfo name' ty' new_info
- var_env' = extendVarEnv var_env id id'
-
- -- Note [Tidy IdInfo]
- new_info = vanillaIdInfo `setOccInfo` occInfo old_info
old_info = idInfo id
+ new_info = vanillaIdInfo
+ `setOccInfo` occInfo old_info
+ `setArityInfo` exprArity rhs
+ `setStrictnessInfo` strictnessInfo old_info
+ `setDemandInfo` demandInfo old_info
+ `setInlinePragInfo` inlinePragInfo old_info
+ `setUnfoldingInfo` new_unf
+
+ new_unf | isStableUnfolding old_unf = tidyUnfolding rec_tidy_env old_unf old_unf
+ | otherwise = noUnfolding
+ old_unf = unfoldingInfo old_info
in
- ((tidy_env', var_env'), id')
- }
+ ((tidy_env', var_env'), id') }
------------ Unfolding --------------
tidyUnfolding :: TidyEnv -> Unfolding -> Unfolding -> Unfolding
@@ -234,9 +243,26 @@ two reasons:
the benefit of that occurrence analysis when we use the rule or
or inline the function. In particular, it's vital not to lose
loop-breaker info, else we get an infinite inlining loop
-
+
Note that tidyLetBndr puts more IdInfo back.
+Note [Preserve evaluatedness]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider
+ data T = MkT !Bool
+ ....(case v of MkT y ->
+ let z# = case y of
+ True -> 1#
+ False -> 2#
+ in ...)
+
+The z# binding is ok because the RHS is ok-for-speculation,
+but Lint will complain unless it can *see* that. So we
+preserve the evaluated-ness on 'y' in tidyBndr.
+
+(Another alternative would be to tidy unboxed lets into cases,
+but that seems more indirect and surprising.)
+
\begin{code}
(=:) :: a -> (a -> b) -> b
diff --git a/compiler/coreSyn/CoreUnfold.lhs b/compiler/coreSyn/CoreUnfold.lhs
index a219de8a8cc2..2d32d392f0d6 100644
--- a/compiler/coreSyn/CoreUnfold.lhs
+++ b/compiler/coreSyn/CoreUnfold.lhs
@@ -15,7 +15,8 @@ literal''). In the corner of a @CoreUnfolding@ unfolding, you will
find, unsurprisingly, a Core expression.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -98,8 +99,11 @@ mkSimpleUnfolding :: DynFlags -> CoreExpr -> Unfolding
mkSimpleUnfolding dflags = mkUnfolding dflags InlineRhs False False
mkDFunUnfolding :: [Var] -> DataCon -> [CoreExpr] -> Unfolding
-mkDFunUnfolding bndrs con ops
- = DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = ops }
+mkDFunUnfolding bndrs con ops
+ = DFunUnfolding { df_bndrs = bndrs
+ , df_con = con
+ , df_args = map occurAnalyseExpr ops }
+ -- See Note [Occurrrence analysis of unfoldings]
mkWwInlineRule :: CoreExpr -> Arity -> Unfolding
mkWwInlineRule expr arity
@@ -143,6 +147,7 @@ mkCoreUnfolding :: UnfoldingSource -> Bool -> CoreExpr
-- Occurrence-analyses the expression before capturing it
mkCoreUnfolding src top_lvl expr arity guidance
= CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
+ -- See Note [Occurrrence analysis of unfoldings]
uf_src = src,
uf_arity = arity,
uf_is_top = top_lvl,
@@ -162,6 +167,7 @@ mkUnfolding dflags src top_lvl is_bottoming expr
= NoUnfolding -- See Note [Do not inline top-level bottoming functions]
| otherwise
= CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
+ -- See Note [Occurrrence analysis of unfoldings]
uf_src = src,
uf_arity = arity,
uf_is_top = top_lvl,
@@ -176,6 +182,24 @@ mkUnfolding dflags src top_lvl is_bottoming expr
-- See Note [Calculate unfolding guidance on the non-occ-anal'd expression]
\end{code}
+Note [Occurrence analysis of unfoldings]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We do occurrence-analysis of unfoldings once and for all, when the
+unfolding is built, rather than each time we inline them.
+
+But given this decision it's vital that we do
+*always* do it. Consider this unfolding
+ \x -> letrec { f = ...g...; g* = f } in body
+where g* is (for some strange reason) the loop breaker. If we don't
+occ-anal it when reading it in, we won't mark g as a loop breaker, and
+we may inline g entirely in body, dropping its binding, and leaving
+the occurrence in f out of scope. This happened in Trac #8892, where
+the unfolding in question was a DFun unfolding.
+
+But more generally, the simplifier is designed on the
+basis that it is looking at occurrence-analysed expressions, so better
+ensure that they acutally are.
+
Note [Calculate unfolding guidance on the non-occ-anal'd expression]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Notice that we give the non-occur-analysed expression to
@@ -234,7 +258,8 @@ calcUnfoldingGuidance
-> CoreExpr -- Expression to look at
-> (Arity, UnfoldingGuidance)
calcUnfoldingGuidance dflags expr
- = case collectBinders expr of { (bndrs, body) ->
+ = case stripTicksTop (not . tickishIsCode) expr of { (_, expr') ->
+ case collectBinders expr' of { (bndrs, body) ->
let
bOMB_OUT_SIZE = ufCreationThreshold dflags
-- Bomb out if size gets bigger than this
@@ -264,7 +289,7 @@ calcUnfoldingGuidance dflags expr
| otherwise = (+)
-- See Note [Function and non-function discounts]
in
- (n_val_bndrs, guidance) }
+ (n_val_bndrs, guidance) } }
\end{code}
Note [Computing the size of an expression]
diff --git a/compiler/coreSyn/CoreUtils.lhs b/compiler/coreSyn/CoreUtils.lhs
index ea2e17fb048a..7082bb80cadc 100644
--- a/compiler/coreSyn/CoreUtils.lhs
+++ b/compiler/coreSyn/CoreUtils.lhs
@@ -6,11 +6,13 @@
Utility functions on @Core@ syntax
\begin{code}
+{-# LANGUAGE CPP #-}
+
-- | Commonly useful utilites for manipulating the Core language
module CoreUtils (
-- * Constructing expressions
mkCast,
- mkTick, mkTickNoHNF, tickHNFArgs,
+ mkTick, mkTicks, mkTickNoHNF, tickHNFArgs,
bindNonRec, needsCaseBinding,
mkAltExpr,
@@ -31,14 +33,17 @@ module CoreUtils (
CoreStats(..), coreBindsStats,
-- * Equality
- cheapEqExpr, eqExpr,
+ cheapEqExpr, cheapEqExpr', eqExpr,
-- * Eta reduction
tryEtaReduce,
-- * Manipulating data constructors and types
applyTypeToArgs, applyTypeToArg,
- dataConRepInstPat, dataConRepFSInstPat
+ dataConRepInstPat, dataConRepFSInstPat,
+
+ -- * Working with ticks
+ stripTicksTop, stripTicks, annotateCoreNotes,
) where
#include "HsVersions.h"
@@ -69,6 +74,9 @@ import Platform
import Util
import Pair
import Data.List
+import Control.Applicative
+import Data.Traversable ( traverse )
+import OrdList
\end{code}
@@ -215,7 +223,7 @@ mkCast expr co
-- if to_ty `eqType` from_ty
-- then expr
-- else
- WARN(not (from_ty `eqType` exprType expr), text "Trying to coerce" <+> text "(" <> ppr expr $$ text "::" <+> ppr (exprType expr) <> text ")" $$ ppr co $$ pprEqPred (coercionKind co))
+ WARN(not (from_ty `eqType` exprType expr), text "Trying to coerce" <+> text "(" <> ppr expr $$ text "::" <+> ppr (exprType expr) <> text ")" $$ ppr co $$ ppr (coercionType co))
(Cast expr co)
\end{code}
@@ -223,48 +231,73 @@ mkCast expr co
-- | Wraps the given expression in the source annotation, dropping the
-- annotation if possible.
mkTick :: Tickish Id -> CoreExpr -> CoreExpr
-
-mkTick t (Var x)
- | isFunTy (idType x) = Tick t (Var x)
- | otherwise
- = if tickishCounts t
- then if tickishScoped t && tickishCanSplit t
- then Tick (mkNoScope t) (Var x)
- else Tick t (Var x)
- else Var x
-
-mkTick t (Cast e co)
- = Cast (mkTick t e) co -- Move tick inside cast
-
-mkTick _ (Coercion co) = Coercion co
-
-mkTick t (Lit l)
- | not (tickishCounts t) = Lit l
-
-mkTick t expr@(App f arg)
- | not (isRuntimeArg arg) = App (mkTick t f) arg
- | isSaturatedConApp expr
- = if not (tickishCounts t)
- then tickHNFArgs t expr
- else if tickishScoped t && tickishCanSplit t
- then Tick (mkNoScope t) (tickHNFArgs (mkNoCount t) expr)
- else Tick t expr
-
-mkTick t (Lam x e)
- -- if this is a type lambda, or the tick does not count entries,
- -- then we can push the tick inside:
- | not (isRuntimeVar x) || not (tickishCounts t) = Lam x (mkTick t e)
- -- if it is both counting and scoped, we split the tick into its
- -- two components, keep the counting tick on the outside of the lambda
- -- and push the scoped tick inside. The point of this is that the
- -- counting tick can probably be floated, and the lambda may then be
- -- in a position to be beta-reduced.
- | tickishScoped t && tickishCanSplit t
- = Tick (mkNoScope t) (Lam x (mkTick (mkNoCount t) e))
- -- just a counting tick: leave it on the outside
- | otherwise = Tick t (Lam x e)
-
-mkTick t other = Tick t other
+mkTick t orig_expr = mkTick' id id orig_expr
+ where
+ -- Some ticks (cost-centres) can be split in two, with the
+ -- non-counting part having laxer placement properties.
+ canSplit = tickishCanSplit t && tickishPlace (mkNoCount t) /= tickishPlace t
+
+ mkTick' top rest expr = case expr of
+
+ -- Walk through ticks, making sure to not introduce tick duplication
+ -- along the way. The "rest" parameter accumulates ticks we need to
+ -- put on the result expression *if* we end up
+ -- actually modifying the expression.
+ Tick t2 e | tickishContains t t2 -> mkTick' top rest e
+ | tickishContains t2 t -> orig_expr
+ | otherwise -> mkTick' top (rest . Tick t2) e
+
+ -- Ticks don't care about types, so we just float all ticks
+ -- through them. Note that it's not enough to check for these
+ -- cases top-level. While mkTick will never produce Core with type
+ -- expressions below ticks, such constructs can be the result of
+ -- unfoldings. We therefore make an effort to put everything into
+ -- the right place no matter what we start with.
+ Cast e co -> mkTick' (top . flip Cast co) rest e
+ Coercion co -> Coercion co
+
+ Lam x e
+ -- Always float through type lambdas. Even for non-type lambdas,
+ -- floating is allowed for all but the most strict placement rule.
+ | not (isRuntimeVar x) || tickishPlace t /= PlaceRuntime
+ -> mkTick' (top . Lam x) rest e
+
+ -- If it is both counting and scoped, we split the tick into its
+ -- two components, often allowing us to keep the counting tick on
+ -- the outside of the lambda and push the scoped tick inside.
+ -- The point of this is that the counting tick can probably be
+ -- floated, and the lambda may then be in a position to be
+ -- beta-reduced.
+ | canSplit
+ -> top $ Tick (mkNoScope t) $ rest $ Lam x $ mkTick (mkNoCount t) e
+
+ App f arg
+ -- Always float through type applications.
+ | not (isRuntimeArg arg)
+ -> mkTick' (top . flip App arg) rest f
+
+ -- We can also float through constructor applications, placement
+ -- permitting. Again we can split.
+ | isSaturatedConApp expr && tickishPlace t == PlaceCostCentre
+ -> top $ rest $ tickHNFArgs t expr
+ | canSplit
+ -> top $ Tick (mkNoScope t) $ rest $ tickHNFArgs (mkNoCount t) expr
+
+ Var x
+ | not (isFunTy (idType x)) && tickishPlace t == PlaceCostCentre
+ -> orig_expr
+ | canSplit
+ -> top $ Tick (mkNoScope t) $ rest expr
+
+ Lit{}
+ | tickishPlace t == PlaceCostCentre
+ -> orig_expr
+
+ -- Catch-all: Annotate where we stand
+ _any -> top $ Tick t $ rest expr
+
+mkTicks :: [Tickish Id] -> CoreExpr -> CoreExpr
+mkTicks ticks expr = foldr mkTick expr ticks
isSaturatedConApp :: CoreExpr -> Bool
isSaturatedConApp e = go e []
@@ -286,6 +319,55 @@ tickHNFArgs t e = push t e
push t (App f (Type u)) = App (push t f) (Type u)
push t (App f arg) = App (push t f) (mkTick t arg)
push _t e = e
+
+-- | Strip ticks satisfying a predicate from top of an expression
+stripTicksTop :: (Tickish Id -> Bool) -> Expr b -> ([Tickish Id], Expr b)
+stripTicksTop p = go []
+ where go ts (Tick t e) | p t = go (t:ts) e
+ go ts other = (reverse ts, other)
+
+-- | Completely strip ticks satisfying a predicate from an expression
+stripTicks :: (Tickish Id -> Bool) -> Expr b -> ([Tickish Id], Expr b)
+stripTicks p expr = (fromOL ticks, expr')
+ where (ticks, expr') = go expr
+ -- Note that OrdList (Tickish Id) is a Monoid, which makes
+ -- ((,) (OrdList (Tickish Id))) an Applicative.
+ go (App e a) = App <$> go e <*> go a
+ go (Lam b e) = Lam b <$> go e
+ go (Let b e) = Let <$> go_bs b <*> go e
+ go (Case e b t as) = Case <$> go e <*> pure b <*> pure t <*> traverse go_a as
+ go (Cast e c) = Cast <$> go e <*> pure c
+ go (Tick t e)
+ | p t = let (ts, e') = go e in (t `consOL` ts, e')
+ | otherwise = Tick t <$> go e
+ go other = pure other
+ go_bs (NonRec b e) = NonRec b <$> go e
+ go_bs (Rec bs) = Rec <$> traverse go_b bs
+ go_b (b, e) = (,) <$> pure b <*> go e
+ go_a (c,bs,e) = (,,) <$> pure c <*> pure bs <*> go e
+
+-- | Add Core ticks to a Core expression.
+annotateCoreNotes :: CoreBind -> CoreBind
+annotateCoreNotes = go_bs
+ where go (App e a) = App (go e) (go a)
+ go (Lam b e) = Lam b (go e)
+ go (Let b e) = Let (go_bs b) (go e)
+ go (Case e b t as) = Case (go e) b t (map (go_a b) as)
+ go (Cast e c) = Cast (go e) c
+ go (Tick t e) = Tick t (go e)
+ go other = other
+ go_bs (NonRec b e) = NonRec b $ tick_bind b e $ go e
+ go_bs (Rec bs) = Rec $ map go_b bs
+ go_b (b, e) = (b, tick_bind b e $ go e)
+ go_a b alt@(c,bs,e) = (c, bs, Tick (CoreNote b (AltPtr alt)) $ go e)
+ -- When ticking let bindings, we want to move the Core note
+ -- inside lambdas in order to fulfill CorePrep invariants
+ tick_bind b e (Lam b' e') = Lam b' (tick_bind b e e')
+ tick_bind b e (Tick t e') | tickishFloatable t
+ = Tick t (tick_bind b e e')
+ tick_bind b e (Cast e' c) = Cast (tick_bind b e e') c
+ tick_bind b e e' = Tick (CoreNote b (ExprPtr e)) e'
+
\end{code}
%************************************************************************
@@ -547,18 +629,20 @@ saturating them.
Note [Tick trivial]
~~~~~~~~~~~~~~~~~~~
-Ticks are not trivial. If we treat "tick x" as trivial, it will be
-inlined inside lambdas and the entry count will be skewed, for
-example. Furthermore "scc x" will turn into just "x" in mkTick.
+
+Ticks are only trivial if they are pure annotations. If we treat
+"tick x" as trivial, it will be inlined inside lambdas and the
+entry count will be skewed, for example. Furthermore "scc x" will
+turn into just "x" in mkTick.
\begin{code}
exprIsTrivial :: CoreExpr -> Bool
exprIsTrivial (Var _) = True -- See Note [Variables are trivial]
-exprIsTrivial (Type _) = True
+exprIsTrivial (Type _) = True
exprIsTrivial (Coercion _) = True
exprIsTrivial (Lit lit) = litIsTrivial lit
exprIsTrivial (App e arg) = not (isRuntimeArg arg) && exprIsTrivial e
-exprIsTrivial (Tick _ _) = False -- See Note [Tick trivial]
+exprIsTrivial (Tick t e) = not (tickishIsCode t) && exprIsTrivial e -- See Note [Tick trivial]
exprIsTrivial (Cast e _) = exprIsTrivial e
exprIsTrivial (Lam b body) = not (isRuntimeVar b) && exprIsTrivial body
exprIsTrivial _ = False
@@ -775,8 +859,8 @@ exprIsCheap' good_app (Case e _ _ alts) = exprIsCheap' good_app e &&
exprIsCheap' good_app (Tick t e)
| tickishCounts t = False
| otherwise = exprIsCheap' good_app e
- -- never duplicate ticks. If we get this wrong, then HPC's entry
- -- counts will be off (check test in libraries/hpc/tests/raytrace)
+ -- never duplicate counting ticks. If we get this wrong, then
+ -- HPC's entry counts will be off (check test in libraries/hpc/tests/raytrace)
exprIsCheap' good_app (Let (NonRec _ b) e)
= exprIsCheap' good_app b && exprIsCheap' good_app e
@@ -815,6 +899,10 @@ exprIsCheap' good_app other_expr -- Applications and variables
-- always gives bottom; we treat this as cheap
-- because it certainly doesn't need to be shared!
+ go (Tick t e) args
+ | not (tickishCounts t) -- don't duplicate counting ticks, see above
+ = go e args
+
go _ _ = False
--------------
@@ -906,13 +994,22 @@ it's applied only to dictionaries.
-- Note [exprOkForSpeculation: case expressions] below
--
-- Precisely, it returns @True@ iff:
+-- a) The expression guarantees to terminate,
+-- b) soon,
+-- c) without causing a write side effect (e.g. writing a mutable variable)
+-- d) without throwing a Haskell exception
+-- e) without risking an unchecked runtime exception (array out of bounds,
+-- divide by zero)
+--
+-- For @exprOkForSideEffects@ the list is the same, but omitting (e).
--
--- * The expression guarantees to terminate,
--- * soon,
--- * without raising an exception,
--- * without causing a side effect (e.g. writing a mutable variable)
+-- Note that
+-- exprIsHNF implies exprOkForSpeculation
+-- exprOkForSpeculation implies exprOkForSideEffects
+--
+-- See Note [PrimOp can_fail and has_side_effects] in PrimOp
+-- and Note [Implementation: how can_fail/has_side_effects affect transformations]
--
--- Note that if @exprIsHNF e@, then @exprOkForSpecuation e@.
-- As an example of the considerations in this test, consider:
--
-- > let x = case y# +# 1# of { r# -> I# r# }
@@ -954,15 +1051,16 @@ expr_ok primop_ok (Case e _ _ alts)
expr_ok primop_ok other_expr
= case collectArgs other_expr of
- (Var f, args) -> app_ok primop_ok f args
- _ -> False
+ (expr, args) | (_, Var f) <- stripTicksTop (not . tickishCounts) expr
+ -> app_ok primop_ok f args
+ _ -> False
-----------------------------
app_ok :: (PrimOp -> Bool) -> Id -> [Expr b] -> Bool
app_ok primop_ok fun args
= case idDetails fun of
DFunId _ new_type -> not new_type
- -- DFuns terminate, unless the dict is implemented
+ -- DFuns terminate, unless the dict is implemented
-- with a newtype in which case they may not
DataConWorkId {} -> True
@@ -981,14 +1079,12 @@ app_ok primop_ok fun args
-> True
| otherwise
- -> primop_ok op -- A bit conservative: we don't really need
- && all (expr_ok primop_ok) args
-
- -- to care about lazy arguments, but this is easy
+ -> primop_ok op -- A bit conservative: we don't really need
+ && all (expr_ok primop_ok) args -- to care about lazy arguments, but this is easy
_other -> isUnLiftedType (idType fun) -- c.f. the Var case of exprIsHNF
|| idArity fun > n_val_args -- Partial apps
- || (n_val_args == 0 &&
+ || (n_val_args == 0 &&
isEvaldUnfolding (idUnfolding fun)) -- Let-bound values
where
n_val_args = valArgCount args
@@ -1222,7 +1318,7 @@ dataConInstPat :: [FastString] -- A long enough list of FSs to use for
-> [Unique] -- An equally long list of uniques, at least one for each binder
-> DataCon
-> [Type] -- Types to instantiate the universally quantified tyvars
- -> ([TyVar], [Id]) -- Return instantiated variables
+ -> ([TyVar], [Id]) -- Return instantiated variables
-- dataConInstPat arg_fun fss us con inst_tys returns a triple
-- (ex_tvs, arg_ids),
--
@@ -1250,14 +1346,14 @@ dataConInstPat :: [FastString] -- A long enough list of FSs to use for
--
-- where the double-primed variables are created with the FastStrings and
-- Uniques given as fss and us
-dataConInstPat fss uniqs con inst_tys
+dataConInstPat fss uniqs con inst_tys
= ASSERT( univ_tvs `equalLength` inst_tys )
(ex_bndrs, arg_ids)
- where
+ where
univ_tvs = dataConUnivTyVars con
ex_tvs = dataConExTyVars con
arg_tys = dataConRepArgTys con
-
+ arg_strs = dataConRepStrictness con -- 1-1 with arg_tys
n_ex = length ex_tvs
-- split the Uniques and FastStrings
@@ -1268,7 +1364,7 @@ dataConInstPat fss uniqs con inst_tys
univ_subst = zipOpenTvSubst univ_tvs inst_tys
-- Make existential type variables, applyingn and extending the substitution
- (full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst
+ (full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst
(zip3 ex_tvs ex_fss ex_uniqs)
mk_ex_var :: TvSubst -> (TyVar, FastString, Unique) -> (TvSubst, TyVar)
@@ -1280,11 +1376,30 @@ dataConInstPat fss uniqs con inst_tys
kind = Type.substTy subst (tyVarKind tv)
-- Make value vars, instantiating types
- arg_ids = zipWith3 mk_id_var id_uniqs id_fss arg_tys
- mk_id_var uniq fs ty = mkUserLocal (mkVarOccFS fs) uniq
- (Type.substTy full_subst ty) noSrcSpan
+ arg_ids = zipWith4 mk_id_var id_uniqs id_fss arg_tys arg_strs
+ mk_id_var uniq fs ty str
+ = mkLocalIdWithInfo name (Type.substTy full_subst ty) info
+ where
+ name = mkInternalName uniq (mkVarOccFS fs) noSrcSpan
+ info | isMarkedStrict str = vanillaIdInfo `setUnfoldingInfo` evaldUnfolding
+ | otherwise = vanillaIdInfo
+ -- See Note [Mark evaluated arguments]
\end{code}
+Note [Mark evaluated arguments]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When pattern matching on a constructor with strict fields, the binder
+can have an 'evaldUnfolding'. Moreover, it *should* have one, so that
+when loading an interface file unfolding like:
+ data T = MkT !Int
+ f x = case x of { MkT y -> let v::Int# = case y of I# n -> n+1
+ in ... }
+we don't want Lint to complain. The 'y' is evaluated, so the
+case in the RHS of the binding for 'v' is fine. But only if we
+*know* that 'y' is evaluated.
+
+c.f. add_evals in Simplify.simplAlt
+
%************************************************************************
%* *
Equality
@@ -1298,19 +1413,27 @@ dataConInstPat fss uniqs con inst_tys
--
-- See also 'exprIsBig'
cheapEqExpr :: Expr b -> Expr b -> Bool
+cheapEqExpr = cheapEqExpr' (const False)
+
+-- | Same cheap equality test as @cheapEqExpr@, but also cheaply looks
+-- through given ticks while doing so.
+cheapEqExpr' :: (Tickish Id -> Bool) -> Expr b -> Expr b -> Bool
+cheapEqExpr' ignoreTick = go
+ where go (Var v1) (Var v2) = v1==v2
+ go (Lit lit1) (Lit lit2) = lit1 == lit2
+ go (Type t1) (Type t2) = t1 `eqType` t2
+ go (Coercion c1) (Coercion c2) = c1 `coreEqCoercion` c2
-cheapEqExpr (Var v1) (Var v2) = v1==v2
-cheapEqExpr (Lit lit1) (Lit lit2) = lit1 == lit2
-cheapEqExpr (Type t1) (Type t2) = t1 `eqType` t2
-cheapEqExpr (Coercion c1) (Coercion c2) = c1 `coreEqCoercion` c2
+ go (App f1 a1) (App f2 a2)
+ = f1 `cheapEqExpr` f2 && a1 `cheapEqExpr` a2
-cheapEqExpr (App f1 a1) (App f2 a2)
- = f1 `cheapEqExpr` f2 && a1 `cheapEqExpr` a2
+ go (Cast e1 t1) (Cast e2 t2)
+ = e1 `cheapEqExpr` e2 && t1 `coreEqCoercion` t2
-cheapEqExpr (Cast e1 t1) (Cast e2 t2)
- = e1 `cheapEqExpr` e2 && t1 `coreEqCoercion` t2
+ go (Tick t e1) e2 | ignoreTick t = go e1 e2
+ go e1 (Tick t e2) | ignoreTick t = go e1 e2
-cheapEqExpr _ _ = False
+ go _ _ = False
\end{code}
\begin{code}
@@ -1605,9 +1728,16 @@ tryEtaReduce bndrs body
= Just (mkCast fun co) -- Check for any of the binders free in the result
-- including the accumulated coercion
+ go bs (Tick t e) co
+ | tickishFloatable t
+ = fmap (Tick t) $ go bs e co
+ -- Float app ticks: \x -> Tick t (e x) ==> Tick t e
+
go (b : bs) (App fun arg) co
- | Just co' <- ok_arg b arg co
- = go bs fun co'
+ | let (ticks, arg') = stripTicksTop tickishFloatable arg
+ , Just co' <- ok_arg b arg' co
+ = fmap (flip (foldr mkTick) ticks) $ go bs fun co'
+ -- Float arg ticks: \x -> e (Tick t x) ==> Tick t e
go _ _ _ = Nothing -- Failure!
@@ -1652,6 +1782,7 @@ tryEtaReduce bndrs body
| bndr == v = Just (mkFunCo Representational (mkSymCo co_arg) co)
-- The simplifier combines multiple casts into one,
-- so we can have a simple-minded pattern match here
+
ok_arg _ _ _ = Nothing
\end{code}
diff --git a/compiler/coreSyn/ExternalCore.lhs b/compiler/coreSyn/ExternalCore.lhs
deleted file mode 100644
index ecc24b115583..000000000000
--- a/compiler/coreSyn/ExternalCore.lhs
+++ /dev/null
@@ -1,118 +0,0 @@
-%
-% (c) The University of Glasgow 2001-2006
-%
-\begin{code}
-module ExternalCore where
-
-import Data.Word
-
-data Module
- = Module Mname [Tdef] [Vdefg]
-
-data Tdef
- = Data (Qual Tcon) [Tbind] [Cdef]
- | Newtype (Qual Tcon) (Qual Tcon) [Tbind] Ty
-
-data Cdef
- = Constr (Qual Dcon) [Tbind] [Ty]
- | GadtConstr (Qual Dcon) Ty
-
-data Vdefg
- = Rec [Vdef]
- | Nonrec Vdef
-
--- Top-level bindings are qualified, so that the printer doesn't have to pass
--- around the module name.
-type Vdef = (Bool,Qual Var,Ty,Exp)
-
-data Exp
- = Var (Qual Var)
- | Dcon (Qual Dcon)
- | Lit Lit
- | App Exp Exp
- | Appt Exp Ty
- | Lam Bind Exp
- | Let Vdefg Exp
- | Case Exp Vbind Ty [Alt] {- non-empty list -}
- | Cast Exp Coercion
- | Tick String Exp {- XXX probably wrong -}
- | External String String Ty {- target name, convention, and type -}
- | DynExternal String Ty {- convention and type (incl. Addr# of target as first arg) -}
- | Label String
-
-data Bind
- = Vb Vbind
- | Tb Tbind
-
-data Alt
- = Acon (Qual Dcon) [Tbind] [Vbind] Exp
- | Alit Lit Exp
- | Adefault Exp
-
-type Vbind = (Var,Ty)
-type Tbind = (Tvar,Kind)
-
-data Ty
- = Tvar Tvar
- | Tcon (Qual Tcon)
- | Tapp Ty Ty
- | Tforall Tbind Ty
-
-data Coercion
--- We distinguish primitive coercions because External Core treats
--- them specially, so we have to print them out with special syntax.
- = ReflCoercion Role Ty
- | SymCoercion Coercion
- | TransCoercion Coercion Coercion
- | TyConAppCoercion Role (Qual Tcon) [Coercion]
- | AppCoercion Coercion Coercion
- | ForAllCoercion Tbind Coercion
- | CoVarCoercion Var
- | UnivCoercion Role Ty Ty
- | InstCoercion Coercion Ty
- | NthCoercion Int Coercion
- | AxiomCoercion (Qual Tcon) Int [Coercion]
- | LRCoercion LeftOrRight Coercion
- | SubCoercion Coercion
-
-data Role = Nominal | Representational | Phantom
-
-data LeftOrRight = CLeft | CRight
-
-data Kind
- = Klifted
- | Kunlifted
- | Kunboxed
- | Kopen
- | Karrow Kind Kind
-
-data Lit
- = Lint Integer Ty
- | Lrational Rational Ty
- | Lchar Char Ty
- | Lstring [Word8] Ty
-
-
-type Mname = Id
-type Var = Id
-type Tvar = Id
-type Tcon = Id
-type Dcon = Id
-
-type Qual t = (Mname,t)
-
-type Id = String
-
-primMname :: Mname
--- For truly horrible reasons, this must be z-encoded.
--- With any hope, the z-encoding will die soon.
-primMname = "ghczmprim:GHCziPrim"
-
-tcArrow :: Qual Tcon
-tcArrow = (primMname, "(->)")
-
-\end{code}
-
-
-
-
diff --git a/compiler/coreSyn/MkCore.lhs b/compiler/coreSyn/MkCore.lhs
index f71b4b4ff651..3ba8b1d6eee6 100644
--- a/compiler/coreSyn/MkCore.lhs
+++ b/compiler/coreSyn/MkCore.lhs
@@ -1,5 +1,6 @@
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -303,9 +304,9 @@ mkStringExprFS str
mkEqBox :: Coercion -> CoreExpr
mkEqBox co = ASSERT2( typeKind ty2 `eqKind` k, ppr co $$ ppr ty1 $$ ppr ty2 $$ ppr (typeKind ty1) $$ ppr (typeKind ty2) )
Var (dataConWorkId datacon) `mkTyApps` [k, ty1, ty2] `App` Coercion co
- where Pair ty1 ty2 = coercionKind co
+ where (Pair ty1 ty2, role) = coercionKindRole co
k = typeKind ty1
- datacon = case coercionRole co of
+ datacon = case role of
Nominal -> eqBoxDataCon
Representational -> coercibleDataCon
Phantom -> pprPanic "mkEqBox does not support boxing phantom coercions"
@@ -414,12 +415,17 @@ mkBigCoreTupTy = mkChunkified mkBoxedTupleTy
%************************************************************************
\begin{code}
-data FloatBind
+data FloatBind
= FloatLet CoreBind
- | FloatCase CoreExpr Id AltCon [Var]
+ | FloatCase CoreExpr Id AltCon [Var]
-- case e of y { C ys -> ... }
-- See Note [Floating cases] in SetLevels
+instance Outputable FloatBind where
+ ppr (FloatLet b) = ptext (sLit "LET") <+> ppr b
+ ppr (FloatCase e b c bs) = hang (ptext (sLit "CASE") <+> ppr e <+> ptext (sLit "of") <+> ppr b)
+ 2 (ppr c <+> ppr bs)
+
wrapFloat :: FloatBind -> CoreExpr -> CoreExpr
wrapFloat (FloatLet defns) body = Let defns body
wrapFloat (FloatCase e b con bs) body = Case e b (exprType body) [(con, bs, body)]
diff --git a/compiler/coreSyn/MkExternalCore.lhs b/compiler/coreSyn/MkExternalCore.lhs
deleted file mode 100644
index 6a6f0551ed51..000000000000
--- a/compiler/coreSyn/MkExternalCore.lhs
+++ /dev/null
@@ -1,360 +0,0 @@
-
-% (c) The University of Glasgow 2001-2006
-%
-\begin{code}
-module MkExternalCore (
- emitExternalCore
-) where
-
-#include "HsVersions.h"
-
-import qualified ExternalCore as C
-import Module
-import CoreSyn
-import HscTypes
-import TyCon
-import CoAxiom
--- import Class
-import TypeRep
-import Type
-import Kind
-import PprExternalCore () -- Instances
-import DataCon
-import Coercion
-import Var
-import IdInfo
-import Literal
-import Name
-import Outputable
-import Encoding
-import ForeignCall
-import DynFlags
-import FastString
-import Exception
-
-import Control.Applicative (Applicative(..))
-import Control.Monad
-import qualified Data.ByteString as BS
-import Data.Char
-import System.IO
-
-emitExternalCore :: DynFlags -> FilePath -> CgGuts -> IO ()
-emitExternalCore dflags extCore_filename cg_guts
- | gopt Opt_EmitExternalCore dflags
- = (do handle <- openFile extCore_filename WriteMode
- hPutStrLn handle (show (mkExternalCore dflags cg_guts))
- hClose handle)
- `catchIO` (\_ -> pprPanic "Failed to open or write external core output file"
- (text extCore_filename))
-emitExternalCore _ _ _
- | otherwise
- = return ()
-
--- Reinventing the Reader monad; whee.
-newtype CoreM a = CoreM (CoreState -> (CoreState, a))
-data CoreState = CoreState {
- cs_dflags :: DynFlags,
- cs_module :: Module
- }
-
-instance Functor CoreM where
- fmap = liftM
-
-instance Applicative CoreM where
- pure = return
- (<*>) = ap
-
-instance Monad CoreM where
- (CoreM m) >>= f = CoreM (\ s -> case m s of
- (s',r) -> case f r of
- CoreM f' -> f' s')
- return x = CoreM (\ s -> (s, x))
-runCoreM :: CoreM a -> CoreState -> a
-runCoreM (CoreM f) s = snd $ f s
-ask :: CoreM CoreState
-ask = CoreM (\ s -> (s,s))
-
-instance HasDynFlags CoreM where
- getDynFlags = liftM cs_dflags ask
-
-mkExternalCore :: DynFlags -> CgGuts -> C.Module
--- The ModGuts has been tidied, but the implicit bindings have
--- not been injected, so we have to add them manually here
--- We don't include the strange data-con *workers* because they are
--- implicit in the data type declaration itself
-mkExternalCore dflags (CgGuts {cg_module=this_mod, cg_tycons = tycons,
- cg_binds = binds})
-{- Note that modules can be mutually recursive, but even so, we
- print out dependency information within each module. -}
- = C.Module (mname dflags) tdefs (runCoreM (mapM (make_vdef True) binds) initialState)
- where
- initialState = CoreState {
- cs_dflags = dflags,
- cs_module = this_mod
- }
- mname dflags = make_mid dflags this_mod
- tdefs = foldr (collect_tdefs dflags) [] tycons
-
-collect_tdefs :: DynFlags -> TyCon -> [C.Tdef] -> [C.Tdef]
-collect_tdefs dflags tcon tdefs
- | isAlgTyCon tcon = tdef: tdefs
- where
- tdef | isNewTyCon tcon =
- C.Newtype (qtc dflags tcon)
- (qcc dflags (newTyConCo tcon))
- (map make_tbind tyvars)
- (make_ty dflags (snd (newTyConRhs tcon)))
- | otherwise =
- C.Data (qtc dflags tcon) (map make_tbind tyvars)
- (map (make_cdef dflags) (tyConDataCons tcon))
- tyvars = tyConTyVars tcon
-
-collect_tdefs _ _ tdefs = tdefs
-
-qtc :: DynFlags -> TyCon -> C.Qual C.Tcon
-qtc dflags = make_con_qid dflags . tyConName
-
-qcc :: DynFlags -> CoAxiom br -> C.Qual C.Tcon
-qcc dflags = make_con_qid dflags . co_ax_name
-
-make_cdef :: DynFlags -> DataCon -> C.Cdef
-make_cdef dflags dcon = C.Constr dcon_name existentials tys
- where
- dcon_name = make_qid dflags False False (dataConName dcon)
- existentials = map make_tbind ex_tyvars
- ex_tyvars = dataConExTyVars dcon
- tys = map (make_ty dflags) (dataConRepArgTys dcon)
-
-make_tbind :: TyVar -> C.Tbind
-make_tbind tv = (make_var_id (tyVarName tv), make_kind (tyVarKind tv))
-
-make_vbind :: DynFlags -> Var -> C.Vbind
-make_vbind dflags v = (make_var_id (Var.varName v), make_ty dflags (varType v))
-
-make_vdef :: Bool -> CoreBind -> CoreM C.Vdefg
-make_vdef topLevel b =
- case b of
- NonRec v e -> f (v,e) >>= (return . C.Nonrec)
- Rec ves -> mapM f ves >>= (return . C.Rec)
- where
- f :: (CoreBndr,CoreExpr) -> CoreM C.Vdef
- f (v,e) = do
- localN <- isALocal vName
- let local = not topLevel || localN
- rhs <- make_exp e
- -- use local flag to determine where to add the module name
- dflags <- getDynFlags
- return (local, make_qid dflags local True vName, make_ty dflags (varType v),rhs)
- where vName = Var.varName v
-
-make_exp :: CoreExpr -> CoreM C.Exp
-make_exp (Var v) = do
- let vName = Var.varName v
- isLocal <- isALocal vName
- dflags <- getDynFlags
- return $
- case idDetails v of
- FCallId (CCall (CCallSpec (StaticTarget nm _ True) callconv _))
- -> C.External (unpackFS nm) (showPpr dflags callconv) (make_ty dflags (varType v))
- FCallId (CCall (CCallSpec (StaticTarget _ _ False) _ _)) ->
- panic "make_exp: FFI values not supported"
- FCallId (CCall (CCallSpec DynamicTarget callconv _))
- -> C.DynExternal (showPpr dflags callconv) (make_ty dflags (varType v))
- -- Constructors are always exported, so make sure to declare them
- -- with qualified names
- DataConWorkId _ -> C.Var (make_var_qid dflags False vName)
- DataConWrapId _ -> C.Var (make_var_qid dflags False vName)
- _ -> C.Var (make_var_qid dflags isLocal vName)
-make_exp (Lit (MachLabel s _ _)) = return $ C.Label (unpackFS s)
-make_exp (Lit l) = do dflags <- getDynFlags
- return $ C.Lit (make_lit dflags l)
-make_exp (App e (Type t)) = do b <- make_exp e
- dflags <- getDynFlags
- return $ C.Appt b (make_ty dflags t)
-make_exp (App _e (Coercion _co)) = error "make_exp (App _ (Coercion _))" -- TODO
-make_exp (App e1 e2) = do
- rator <- make_exp e1
- rand <- make_exp e2
- return $ C.App rator rand
-make_exp (Lam v e) | isTyVar v = make_exp e >>= (\ b ->
- return $ C.Lam (C.Tb (make_tbind v)) b)
-make_exp (Lam v e) | otherwise = do b <- make_exp e
- dflags <- getDynFlags
- return $ C.Lam (C.Vb (make_vbind dflags v)) b
-make_exp (Cast e co) = do b <- make_exp e
- dflags <- getDynFlags
- return $ C.Cast b (make_co dflags co)
-make_exp (Let b e) = do
- vd <- make_vdef False b
- body <- make_exp e
- return $ C.Let vd body
-make_exp (Case e v ty alts) = do
- scrut <- make_exp e
- newAlts <- mapM make_alt alts
- dflags <- getDynFlags
- return $ C.Case scrut (make_vbind dflags v) (make_ty dflags ty) newAlts
-make_exp (Tick _ e) = make_exp e >>= (return . C.Tick "SCC") -- temporary
-make_exp _ = error "MkExternalCore died: make_exp"
-
-make_alt :: CoreAlt -> CoreM C.Alt
-make_alt (DataAlt dcon, vs, e) = do
- newE <- make_exp e
- dflags <- getDynFlags
- return $ C.Acon (make_con_qid dflags (dataConName dcon))
- (map make_tbind tbs)
- (map (make_vbind dflags) vbs)
- newE
- where (tbs,vbs) = span isTyVar vs
-make_alt (LitAlt l,_,e) = do x <- make_exp e
- dflags <- getDynFlags
- return $ C.Alit (make_lit dflags l) x
-make_alt (DEFAULT,[],e) = make_exp e >>= (return . C.Adefault)
--- This should never happen, as the DEFAULT alternative binds no variables,
--- but we might as well check for it:
-make_alt a@(DEFAULT,_ ,_) = pprPanic ("MkExternalCore: make_alt: DEFAULT "
- ++ "alternative had a non-empty var list") (ppr a)
-
-
-make_lit :: DynFlags -> Literal -> C.Lit
-make_lit dflags l =
- case l of
- -- Note that we need to check whether the character is "big".
- -- External Core only allows character literals up to '\xff'.
- MachChar i | i <= chr 0xff -> C.Lchar i t
- -- For a character bigger than 0xff, we represent it in ext-core
- -- as an int lit with a char type.
- MachChar i -> C.Lint (fromIntegral $ ord i) t
- MachStr s -> C.Lstring (BS.unpack s) t
- MachNullAddr -> C.Lint 0 t
- MachInt i -> C.Lint i t
- MachInt64 i -> C.Lint i t
- MachWord i -> C.Lint i t
- MachWord64 i -> C.Lint i t
- MachFloat r -> C.Lrational r t
- MachDouble r -> C.Lrational r t
- LitInteger i _ -> C.Lint i t
- _ -> pprPanic "MkExternalCore died: make_lit" (ppr l)
- where
- t = make_ty dflags (literalType l)
-
--- Expand type synonyms, then convert.
-make_ty :: DynFlags -> Type -> C.Ty -- Be sure to expand types recursively!
- -- example: FilePath ~> String ~> [Char]
-make_ty dflags t | Just expanded <- tcView t = make_ty dflags expanded
-make_ty dflags t = make_ty' dflags t
-
--- note calls to make_ty so as to expand types recursively
-make_ty' :: DynFlags -> Type -> C.Ty
-make_ty' _ (TyVarTy tv) = C.Tvar (make_var_id (tyVarName tv))
-make_ty' dflags (AppTy t1 t2) = C.Tapp (make_ty dflags t1) (make_ty dflags t2)
-make_ty' dflags (FunTy t1 t2) = make_ty dflags (TyConApp funTyCon [t1,t2])
-make_ty' dflags (ForAllTy tv t) = C.Tforall (make_tbind tv) (make_ty dflags t)
-make_ty' dflags (TyConApp tc ts) = make_tyConApp dflags tc ts
-make_ty' _ (LitTy {}) = panic "MkExernalCore can't do literal types yet"
-
--- Newtypes are treated just like any other type constructor; not expanded
--- Reason: predTypeRep does substitution and, while substitution deals
--- correctly with name capture, it's only correct if you see the uniques!
--- If you just see occurrence names, name capture may occur.
--- Example: newtype A a = A (forall b. b -> a)
--- test :: forall q b. q -> A b
--- test _ = undefined
--- Here the 'a' gets substituted by 'b', which is captured.
--- Another solution would be to expand newtypes before tidying; but that would
--- expose the representation in interface files, which definitely isn't right.
--- Maybe CoreTidy should know whether to expand newtypes or not?
-
-make_tyConApp :: DynFlags -> TyCon -> [Type] -> C.Ty
-make_tyConApp dflags tc ts =
- foldl C.Tapp (C.Tcon (qtc dflags tc))
- (map (make_ty dflags) ts)
-
-make_kind :: Kind -> C.Kind
-make_kind (FunTy k1 k2) = C.Karrow (make_kind k1) (make_kind k2)
-make_kind k
- | isLiftedTypeKind k = C.Klifted
- | isUnliftedTypeKind k = C.Kunlifted
- | isOpenTypeKind k = C.Kopen
-make_kind _ = error "MkExternalCore died: make_kind"
-
-{- Id generation. -}
-
-make_id :: Bool -> Name -> C.Id
--- include uniques for internal names in order to avoid name shadowing
-make_id _is_var nm = ((occNameString . nameOccName) nm)
- ++ (if isInternalName nm then (show . nameUnique) nm else "")
-
-make_var_id :: Name -> C.Id
-make_var_id = make_id True
-
--- It's important to encode the module name here, because in External Core,
--- base:GHC.Base => base:GHCziBase
--- We don't do this in pprExternalCore because we
--- *do* want to keep the package name (we don't want baseZCGHCziBase,
--- because that would just be ugly.)
--- SIGH.
--- We encode the package name as well.
-make_mid :: DynFlags -> Module -> C.Id
--- Super ugly code, but I can't find anything else that does quite what I
--- want (encodes the hierarchical module name without encoding the colon
--- that separates the package name from it.)
-make_mid dflags m
- = showSDoc dflags $
- (text $ zEncodeString $ packageIdString $ modulePackageId m)
- <> text ":"
- <> (pprEncoded $ pprModuleName $ moduleName m)
- where pprEncoded = pprCode CStyle
-
-make_qid :: DynFlags -> Bool -> Bool -> Name -> C.Qual C.Id
-make_qid dflags force_unqual is_var n = (mname,make_id is_var n)
- where mname =
- case nameModule_maybe n of
- Just m | not force_unqual -> make_mid dflags m
- _ -> ""
-
-make_var_qid :: DynFlags -> Bool -> Name -> C.Qual C.Id
-make_var_qid dflags force_unqual = make_qid dflags force_unqual True
-
-make_con_qid :: DynFlags -> Name -> C.Qual C.Id
-make_con_qid dflags = make_qid dflags False False
-
-make_co :: DynFlags -> Coercion -> C.Coercion
-make_co dflags (Refl r ty) = C.ReflCoercion (make_role r) $ make_ty dflags ty
-make_co dflags (TyConAppCo r tc cos) = C.TyConAppCoercion (make_role r) (qtc dflags tc) (map (make_co dflags) cos)
-make_co dflags (AppCo c1 c2) = C.AppCoercion (make_co dflags c1) (make_co dflags c2)
-make_co dflags (ForAllCo tv co) = C.ForAllCoercion (make_tbind tv) (make_co dflags co)
-make_co _ (CoVarCo cv) = C.CoVarCoercion (make_var_id (coVarName cv))
-make_co dflags (AxiomInstCo cc ind cos) = C.AxiomCoercion (qcc dflags cc) ind (map (make_co dflags) cos)
-make_co dflags (UnivCo r t1 t2) = C.UnivCoercion (make_role r) (make_ty dflags t1) (make_ty dflags t2)
-make_co dflags (SymCo co) = C.SymCoercion (make_co dflags co)
-make_co dflags (TransCo c1 c2) = C.TransCoercion (make_co dflags c1) (make_co dflags c2)
-make_co dflags (NthCo d co) = C.NthCoercion d (make_co dflags co)
-make_co dflags (LRCo lr co) = C.LRCoercion (make_lr lr) (make_co dflags co)
-make_co dflags (InstCo co ty) = C.InstCoercion (make_co dflags co) (make_ty dflags ty)
-make_co dflags (SubCo co) = C.SubCoercion (make_co dflags co)
-make_co _ (AxiomRuleCo {}) = panic "make_co AxiomRuleCo: not yet implemented"
-
-
-make_lr :: LeftOrRight -> C.LeftOrRight
-make_lr CLeft = C.CLeft
-make_lr CRight = C.CRight
-
-make_role :: Role -> C.Role
-make_role Nominal = C.Nominal
-make_role Representational = C.Representational
-make_role Phantom = C.Phantom
-
--------
-isALocal :: Name -> CoreM Bool
-isALocal vName = do
- modName <- liftM cs_module ask
- return $ case nameModule_maybe vName of
- -- Not sure whether isInternalName corresponds to "local"ness
- -- in the External Core sense; need to re-read the spec.
- Just m | m == modName -> isInternalName vName
- _ -> False
-\end{code}
-
-
-
-
diff --git a/compiler/coreSyn/PprCore.lhs b/compiler/coreSyn/PprCore.lhs
index d7990085fe74..44ad90862360 100644
--- a/compiler/coreSyn/PprCore.lhs
+++ b/compiler/coreSyn/PprCore.lhs
@@ -29,6 +29,7 @@ import BasicTypes
import Util
import Outputable
import FastString
+import SrcLoc ( showUserRealSpan )
\end{code}
%************************************************************************
@@ -91,7 +92,14 @@ ppr_bind (Rec binds) = vcat (map pp binds)
ppr_binding :: OutputableBndr b => (b, Expr b) -> SDoc
ppr_binding (val_bdr, expr)
= pprBndr LetBind val_bdr $$
- hang (ppr val_bdr <+> equals) 2 (pprCoreExpr expr)
+ hang (ppr val_bdr <+> equals) 2 (ppr_annot val_bdr $ pprCoreExpr expr)
+
+ppr_annot :: Outputable b => b -> SDoc -> SDoc
+ppr_annot name doc = sdocWithDynFlags $ \dflags ->
+ let annot = showSDocDump dflags $ text "ann<#" <> ppr name <> text "#>"
+ in pprAnnotate annot doc
+
+
\end{code}
\begin{code}
@@ -121,7 +129,7 @@ ppr_expr add_par (Cast expr co)
if gopt Opt_SuppressCoercions dflags
then ptext (sLit "...")
else parens $
- sep [ppr co, dcolon <+> pprEqPred (coercionKind co)]
+ sep [ppr co, dcolon <+> ppr (coercionType co)]
ppr_expr add_par expr@(Lam _ _)
@@ -164,7 +172,8 @@ ppr_expr add_par (Case expr var ty [(con,args,rhs)])
<+> ppr_bndr var
, ptext (sLit "<-") <+> ppr_expr id expr
<+> ptext (sLit "} in") ]
- , pprCoreExpr rhs
+ , ppr_annot (var, con) $
+ pprCoreExpr rhs
]
else add_par $
sep [sep [ptext (sLit "case") <+> pprCoreExpr expr,
@@ -172,6 +181,7 @@ ppr_expr add_par (Case expr var ty [(con,args,rhs)])
sep [ptext (sLit "of") <+> ppr_bndr var,
char '{' <+> ppr_case_pat con args <+> arrow]
],
+ ppr_annot (var, con) $
pprCoreExpr rhs,
char '}'
]
@@ -184,11 +194,12 @@ ppr_expr add_par (Case expr var ty alts)
<+> pprCoreExpr expr
<+> ifPprDebug (braces (ppr ty)),
ptext (sLit "of") <+> ppr_bndr var <+> char '{'],
- nest 2 (vcat (punctuate semi (map pprCoreAlt alts))),
+ nest 2 (vcat (punctuate semi (map ppr_alt alts))),
char '}'
]
where
ppr_bndr = pprBndr CaseBind
+ ppr_alt alt@(con,_,_) = ppr_annot (var, con) $ pprCoreAlt alt
-- special cases: let ... in let ...
@@ -223,7 +234,10 @@ ppr_expr add_par (Let bind expr)
NonRec _ _ -> (sLit "let {")
ppr_expr add_par (Tick tickish expr)
- = add_par (sep [ppr tickish, pprCoreExpr expr])
+ = sdocWithDynFlags $ \dflags ->
+ if gopt Opt_PprShowTicks dflags
+ then add_par (sep [ppr tickish, pprCoreExpr expr])
+ else ppr_expr add_par expr
pprCoreAlt :: OutputableBndr a => (AltCon, [a] , Expr a) -> SDoc
pprCoreAlt (con, args, rhs)
@@ -376,10 +390,11 @@ ppIdInfo id info
else
showAttributes
[ (True, pp_scope <> ppr (idDetails id))
- , (has_arity, ptext (sLit "Arity=") <> int arity)
- , (has_caf_info, ptext (sLit "Caf=") <> ppr caf_info)
- , (True, ptext (sLit "Str=") <> pprStrictness str_info)
- , (has_unf, ptext (sLit "Unf=") <> ppr unf_info)
+ , (has_arity, ptext (sLit "Arity=") <> int arity)
+ , (has_called_arity, ptext (sLit "CallArity=") <> int called_arity)
+ , (has_caf_info, ptext (sLit "Caf=") <> ppr caf_info)
+ , (True, ptext (sLit "Str=") <> pprStrictness str_info)
+ , (has_unf, ptext (sLit "Unf=") <> ppr unf_info)
, (not (null rules), ptext (sLit "RULES:") <+> vcat (map pprRule rules))
] -- Inline pragma, occ, demand, one-shot info
-- printed out with all binders (when debug is on);
@@ -392,6 +407,9 @@ ppIdInfo id info
arity = arityInfo info
has_arity = arity /= 0
+ called_arity = callArityInfo info
+ has_called_arity = called_arity /= 0
+
caf_info = cafInfo info
has_caf_info = not (mayHaveCafRefs caf_info)
@@ -510,6 +528,12 @@ instance Outputable id => Outputable (Tickish id) where
(True,True) -> hcat [ptext (sLit "scctick<"), ppr cc, char '>']
(True,False) -> hcat [ptext (sLit "tick<"), ppr cc, char '>']
_ -> hcat [ptext (sLit "scc<"), ppr cc, char '>']
+ ppr (SourceNote span _) =
+ hcat [ ptext (sLit "src<"), text (showUserRealSpan True span), char '>']
+ ppr (CoreNote {coreBind = bnd, coreNote = ExprPtr{}}) =
+ hcat [ ptext (sLit "core<"), ppr bnd, ptext (sLit "=...>") ]
+ ppr (CoreNote {coreBind = bnd, coreNote = AltPtr (con,_,_)}) =
+ hcat [ ptext (sLit "core<"), ppr bnd <+> ppr con <>ptext (sLit "->...>") ]
\end{code}
-----------------------------------------------------
diff --git a/compiler/coreSyn/PprExternalCore.lhs b/compiler/coreSyn/PprExternalCore.lhs
deleted file mode 100644
index 7fd3ac1d6531..000000000000
--- a/compiler/coreSyn/PprExternalCore.lhs
+++ /dev/null
@@ -1,260 +0,0 @@
-%
-% (c) The University of Glasgow 2001-2006
-%
-
-\begin{code}
-{-# OPTIONS_GHC -fno-warn-orphans #-}
-
-module PprExternalCore () where
-
-import Encoding
-import ExternalCore
-
-import Pretty
-import Data.Char
-import Data.Ratio
-
-instance Show Module where
- showsPrec _ m = shows (pmodule m)
-
-instance Show Tdef where
- showsPrec _ t = shows (ptdef t)
-
-instance Show Cdef where
- showsPrec _ c = shows (pcdef c)
-
-instance Show Vdefg where
- showsPrec _ v = shows (pvdefg v)
-
-instance Show Exp where
- showsPrec _ e = shows (pexp e)
-
-instance Show Alt where
- showsPrec _ a = shows (palt a)
-
-instance Show Ty where
- showsPrec _ t = shows (pty t)
-
-instance Show Kind where
- showsPrec _ k = shows (pkind k)
-
-instance Show Lit where
- showsPrec _ l = shows (plit l)
-
-
-indent :: Doc -> Doc
-indent = nest 2
-
-pmodule :: Module -> Doc
-pmodule (Module mname tdefs vdefgs) =
- (text "%module" <+> text mname)
- $$ indent ((vcat (map ((<> char ';') . ptdef) tdefs))
- $$ (vcat (map ((<> char ';') . pvdefg) vdefgs)))
-
-ptdef :: Tdef -> Doc
-ptdef (Data tcon tbinds cdefs) =
- (text "%data" <+> pqname tcon <+> (hsep (map ptbind tbinds)) <+> char '=')
- $$ indent (braces ((vcat (punctuate (char ';') (map pcdef cdefs)))))
-
-ptdef (Newtype tcon coercion tbinds rep) =
- text "%newtype" <+> pqname tcon <+> pqname coercion
- <+> (hsep (map ptbind tbinds)) $$ indent repclause
- where repclause = char '=' <+> pty rep
-
-pcdef :: Cdef -> Doc
-pcdef (Constr dcon tbinds tys) =
- (pqname dcon) <+> (sep [hsep (map pattbind tbinds),sep (map paty tys)])
-pcdef (GadtConstr dcon ty) =
- (pqname dcon) <+> text "::" <+> pty ty
-
-pname :: Id -> Doc
-pname id = text (zEncodeString id)
-
-pqname :: Qual Id -> Doc
-pqname ("",id) = pname id
-pqname (m,id) = text m <> char '.' <> pname id
-
-ptbind, pattbind :: Tbind -> Doc
-ptbind (t,Klifted) = pname t
-ptbind (t,k) = parens (pname t <> text "::" <> pkind k)
-
-pattbind (t,k) = char '@' <> ptbind (t,k)
-
-pakind, pkind :: Kind -> Doc
-pakind (Klifted) = char '*'
-pakind (Kunlifted) = char '#'
-pakind (Kopen) = char '?'
-pakind k = parens (pkind k)
-
-pkind (Karrow k1 k2) = parens (pakind k1 <> text "->" <> pkind k2)
-pkind k = pakind k
-
-paty, pbty, pty :: Ty -> Doc
--- paty: print in parens, if non-atomic (like a name)
--- pbty: print in parens, if arrow (used only for lhs of arrow)
--- pty: not in parens
-paty (Tvar n) = pname n
-paty (Tcon c) = pqname c
-paty t = parens (pty t)
-
-pbty (Tapp(Tapp(Tcon tc) t1) t2) | tc == tcArrow = parens(fsep [pbty t1, text "->",pty t2])
-pbty t = paty t
-
-pty (Tapp(Tapp(Tcon tc) t1) t2) | tc == tcArrow = fsep [pbty t1, text "->",pty t2]
-pty (Tforall tb t) = text "%forall" <+> pforall [tb] t
-pty ty@(Tapp {}) = pappty ty []
-pty ty@(Tvar {}) = paty ty
-pty ty@(Tcon {}) = paty ty
-
-pappty :: Ty -> [Ty] -> Doc
-pappty (Tapp t1 t2) ts = pappty t1 (t2:ts)
-pappty t ts = sep (map paty (t:ts))
-
-pforall :: [Tbind] -> Ty -> Doc
-pforall tbs (Tforall tb t) = pforall (tbs ++ [tb]) t
-pforall tbs t = hsep (map ptbind tbs) <+> char '.' <+> pty t
-
-paco, pbco, pco :: Coercion -> Doc
-paco (ReflCoercion r ty) = char '<' <> pty ty <> text ">_" <> prole r
-paco (TyConAppCoercion r qtc []) = pqname qtc <> char '_' <> prole r
-paco (AxiomCoercion qtc i []) = pqname qtc <> char '[' <> int i <> char ']'
-paco (CoVarCoercion cv) = pname cv
-paco c = parens (pco c)
-
-pbco (TyConAppCoercion _ arr [co1, co2])
- | arr == tcArrow
- = parens (fsep [pbco co1, text "->", pco co2])
-pbco co = paco co
-
-pco c@(ReflCoercion {}) = paco c
-pco (SymCoercion co) = sep [text "%sub", paco co]
-pco (TransCoercion co1 co2) = sep [text "%trans", paco co1, paco co2]
-pco (TyConAppCoercion _ arr [co1, co2])
- | arr == tcArrow = fsep [pbco co1, text "->", pco co2]
-pco (TyConAppCoercion r qtc cos) = parens (pqname qtc <+> sep (map paco cos)) <> char '_' <> prole r
-pco co@(AppCoercion {}) = pappco co []
-pco (ForAllCoercion tb co) = text "%forall" <+> pforallco [tb] co
-pco co@(CoVarCoercion {}) = paco co
-pco (UnivCoercion r ty1 ty2) = sep [text "%univ", prole r, paty ty1, paty ty2]
-pco (InstCoercion co ty) = sep [text "%inst", paco co, paty ty]
-pco (NthCoercion i co) = sep [text "%nth", int i, paco co]
-pco (AxiomCoercion qtc i cos) = pqname qtc <> char '[' <> int i <> char ']' <+> sep (map paco cos)
-pco (LRCoercion CLeft co) = sep [text "%left", paco co]
-pco (LRCoercion CRight co) = sep [text "%right", paco co]
-pco (SubCoercion co) = sep [text "%sub", paco co]
-
-pappco :: Coercion -> [Coercion ] -> Doc
-pappco (AppCoercion co1 co2) cos = pappco co1 (co2:cos)
-pappco co cos = sep (map paco (co:cos))
-
-pforallco :: [Tbind] -> Coercion -> Doc
-pforallco tbs (ForAllCoercion tb co) = pforallco (tbs ++ [tb]) co
-pforallco tbs co = hsep (map ptbind tbs) <+> char '.' <+> pco co
-
-prole :: Role -> Doc
-prole Nominal = char 'N'
-prole Representational = char 'R'
-prole Phantom = char 'P'
-
-pvdefg :: Vdefg -> Doc
-pvdefg (Rec vdefs) = text "%rec" $$ braces (indent (vcat (punctuate (char ';') (map pvdef vdefs))))
-pvdefg (Nonrec vdef) = pvdef vdef
-
-pvdef :: Vdef -> Doc
--- TODO: Think about whether %local annotations are actually needed.
--- Right now, the local flag is never used, because the Core doc doesn't
--- explain the meaning of %local.
-pvdef (_l,v,t,e) = sep [(pqname v <+> text "::" <+> pty t <+> char '='),
- indent (pexp e)]
-
-paexp, pfexp, pexp :: Exp -> Doc
-paexp (Var x) = pqname x
-paexp (Dcon x) = pqname x
-paexp (Lit l) = plit l
-paexp e = parens(pexp e)
-
-plamexp :: [Bind] -> Exp -> Doc
-plamexp bs (Lam b e) = plamexp (bs ++ [b]) e
-plamexp bs e = sep [sep (map pbind bs) <+> text "->",
- indent (pexp e)]
-
-pbind :: Bind -> Doc
-pbind (Tb tb) = char '@' <+> ptbind tb
-pbind (Vb vb) = pvbind vb
-
-pfexp (App e1 e2) = pappexp e1 [Left e2]
-pfexp (Appt e t) = pappexp e [Right t]
-pfexp e = paexp e
-
-pappexp :: Exp -> [Either Exp Ty] -> Doc
-pappexp (App e1 e2) as = pappexp e1 (Left e2:as)
-pappexp (Appt e t) as = pappexp e (Right t:as)
-pappexp e as = fsep (paexp e : map pa as)
- where pa (Left e) = paexp e
- pa (Right t) = char '@' <+> paty t
-
-pexp (Lam b e) = char '\\' <+> plamexp [b] e
-pexp (Let vd e) = (text "%let" <+> pvdefg vd) $$ (text "%in" <+> pexp e)
-pexp (Case e vb ty alts) = sep [text "%case" <+> paty ty <+> paexp e,
- text "%of" <+> pvbind vb]
- $$ (indent (braces (vcat (punctuate (char ';') (map palt alts)))))
-pexp (Cast e co) = (text "%cast" <+> parens (pexp e)) $$ paco co
-pexp (Tick s e) = (text "%source" <+> pstring s) $$ pexp e
-pexp (External n cc t) = (text "%external" <+> text cc <+> pstring n) $$ paty t
-pexp (DynExternal cc t) = (text "%dynexternal" <+> text cc) $$ paty t
-pexp (Label n) = (text "%label" <+> pstring n)
-pexp e = pfexp e
-
-pvbind :: Vbind -> Doc
-pvbind (x,t) = parens(pname x <> text "::" <> pty t)
-
-palt :: Alt -> Doc
-palt (Acon c tbs vbs e) =
- sep [pqname c,
- sep (map pattbind tbs),
- sep (map pvbind vbs) <+> text "->"]
- $$ indent (pexp e)
-palt (Alit l e) =
- (plit l <+> text "->")
- $$ indent (pexp e)
-palt (Adefault e) =
- (text "%_ ->")
- $$ indent (pexp e)
-
-plit :: Lit -> Doc
-plit (Lint i t) = parens (integer i <> text "::" <> pty t)
--- we use (text (show (numerator r))) (and the same for denominator)
--- because "(rational r)" was printing out things like "2.0e-2" (which
--- isn't External Core), and (text (show r)) was printing out things
--- like "((-1)/5)" which isn't either (it should be "(-1/5)").
-plit (Lrational r t) = parens (text (show (numerator r)) <+> char '%'
- <+> text (show (denominator r)) <> text "::" <> pty t)
-plit (Lchar c t) = parens (text ("\'" ++ escape [c] ++ "\'") <> text "::" <> pty t)
--- This is a little messy. We shouldn't really be going via String.
-plit (Lstring bs t) = parens (pstring str <> text "::" <> pty t)
- where str = map (chr . fromIntegral) bs
-
-pstring :: String -> Doc
-pstring s = doubleQuotes(text (escape s))
-
-escape :: String -> String
-escape s = foldr f [] (map ord s)
- where
- f cv rest
- | cv > 0xFF = '\\':'x':hs ++ rest
- | (cv < 0x20 || cv > 0x7e || cv == 0x22 || cv == 0x27 || cv == 0x5c) =
- '\\':'x':h1:h0:rest
- where (q1,r1) = quotRem cv 16
- h1 = intToDigit q1
- h0 = intToDigit r1
- hs = dropWhile (=='0') $ reverse $ mkHex cv
- mkHex 0 = ""
- mkHex cv = intToDigit r : mkHex q
- where (q,r) = quotRem cv 16
- f cv rest = (chr cv):rest
-
-\end{code}
-
-
-
-
diff --git a/compiler/coreSyn/TrieMap.lhs b/compiler/coreSyn/TrieMap.lhs
index ac04adab1bc3..2744c5d0b853 100644
--- a/compiler/coreSyn/TrieMap.lhs
+++ b/compiler/coreSyn/TrieMap.lhs
@@ -4,14 +4,14 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
-{-# LANGUAGE TypeFamilies #-}
+{-# LANGUAGE RankNTypes, TypeFamilies #-}
module TrieMap(
CoreMap, emptyCoreMap, extendCoreMap, lookupCoreMap, foldCoreMap,
TypeMap, emptyTypeMap, extendTypeMap, lookupTypeMap, foldTypeMap,
diff --git a/compiler/deSugar/Check.lhs b/compiler/deSugar/Check.lhs
index 960475cedd03..e07a70fc6544 100644
--- a/compiler/deSugar/Check.lhs
+++ b/compiler/deSugar/Check.lhs
@@ -5,6 +5,8 @@
% Author: Juan J. Quintela
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Check ( check , ExhaustivePat ) where
#include "HsVersions.h"
@@ -21,7 +23,6 @@ import Name
import TysWiredIn
import PrelNames
import TyCon
-import Type
import SrcLoc
import UniqSet
import Util
@@ -123,7 +124,7 @@ untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])
untidy_message (string, lits) = (string, map untidy_lit lits)
\end{code}
-The function @untidy@ does the reverse work of the @tidy_pat@ funcion.
+The function @untidy@ does the reverse work of the @tidy_pat@ function.
\begin{code}
@@ -144,7 +145,7 @@ untidy b (L loc p) = L loc (untidy' b p)
untidy' _ p@(ConPatIn _ (PrefixCon [])) = p
untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))
untidy' _ (ListPat pats ty Nothing) = ListPat (map untidy_no_pars pats) ty Nothing
- untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty
+ untidy' _ (TuplePat pats box tys) = TuplePat (map untidy_no_pars pats) box tys
untidy' _ (ListPat _ _ (Just _)) = panic "Check.untidy: Overloaded ListPat"
untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"
untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"
@@ -421,8 +422,8 @@ compare_cons _ _ = panic "Check.compare_cons: Not ConPatOut with RealDataCon"
remove_dups :: [Pat Id] -> [Pat Id]
remove_dups [] = []
-remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs
- | otherwise = x : remove_dups xs
+remove_dups (x:xs) | any (\y -> compare_cons x y) xs = remove_dups xs
+ | otherwise = x : remove_dups xs
get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]
get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q,
@@ -468,8 +469,8 @@ get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons
where
used_set :: UniqSet DataCon
used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ (RealDataCon d) } <- used_cons]
- (ConPatOut { pat_ty = ty }) = head used_cons
- Just (ty_con, inst_tys) = splitTyConApp_maybe ty
+ (ConPatOut { pat_con = L _ (RealDataCon con1), pat_arg_tys = inst_tys }) = head used_cons
+ ty_con = dataConTyCon con1
unused_cons = filterOut is_used (tyConDataCons ty_con)
is_used con = con `elementOfUniqSet` used_set
|| dataConCannotMatch inst_tys con
@@ -593,9 +594,9 @@ make_con (ConPatOut{ pat_con = L _ (RealDataCon id) }) (lp:lq:ps, constraints)
| isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints)
where q = unLoc lq
-make_con (ConPatOut{ pat_con = L _ (RealDataCon id), pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints)
- | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints)
- | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints)
+make_con (ConPatOut{ pat_con = L _ (RealDataCon id), pat_args = PrefixCon pats, pat_arg_tys = tys }) (ps, constraints)
+ | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) tys) : rest_pats, constraints)
+ | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints)
| otherwise = (nlConPat name pats_con : rest_pats, constraints)
where
name = getName id
@@ -696,17 +697,16 @@ tidy_pat (CoPat _ pat _) = tidy_pat pat
tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))
tidy_pat (ViewPat _ _ ty) = WildPat ty
tidy_pat (ListPat _ _ (Just (ty,_))) = WildPat ty
-tidy_pat (ConPatOut { pat_con = L _ PatSynCon{}, pat_ty = ty })
- = WildPat ty
+tidy_pat (ConPatOut { pat_con = L _ (PatSynCon syn), pat_arg_tys = tys })
+ = WildPat (patSynInstResTy syn tys)
tidy_pat pat@(ConPatOut { pat_con = L _ con, pat_args = ps })
= pat { pat_args = tidy_con con ps }
tidy_pat (ListPat ps ty Nothing)
- = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)
- (mkNilPat list_ty)
+ = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] [ty])
+ (mkNilPat ty)
(map tidy_lpat ps)
- where list_ty = mkListTy ty
-- introduce fake parallel array constructors to be able to handle parallel
-- arrays with the existing machinery for constructor pattern
@@ -714,11 +714,11 @@ tidy_pat (ListPat ps ty Nothing)
tidy_pat (PArrPat ps ty)
= unLoc $ mkPrefixConPat (parrFakeCon (length ps))
(map tidy_lpat ps)
- (mkPArrTy ty)
+ [ty]
-tidy_pat (TuplePat ps boxity ty)
+tidy_pat (TuplePat ps boxity tys)
= unLoc $ mkPrefixConPat (tupleCon (boxityNormalTupleSort boxity) arity)
- (map tidy_lpat ps) ty
+ (map tidy_lpat ps) tys
where
arity = length ps
@@ -735,8 +735,8 @@ tidy_lit_pat :: HsLit -> Pat Id
-- overlap with each other, or even explicit lists of Chars.
tidy_lit_pat lit
| HsString s <- lit
- = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mkCharLitPat c, pat] stringTy)
- (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)
+ = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mkCharLitPat c, pat] [charTy])
+ (mkPrefixConPat nilDataCon [] [charTy]) (unpackFS s)
| otherwise
= tidyLitPat lit
diff --git a/compiler/deSugar/Coverage.lhs b/compiler/deSugar/Coverage.lhs
index 0ac7de802299..3af9d55a0048 100644
--- a/compiler/deSugar/Coverage.lhs
+++ b/compiler/deSugar/Coverage.lhs
@@ -3,6 +3,8 @@
% (c) University of Glasgow, 2007
%
\begin{code}
+{-# LANGUAGE NondecreasingIndentation #-}
+
module Coverage (addTicksToBinds, hpcInitCode) where
import Type
@@ -62,11 +64,9 @@ addTicksToBinds
-> LHsBinds Id
-> IO (LHsBinds Id, HpcInfo, ModBreaks)
-addTicksToBinds dflags mod mod_loc exports tyCons binds =
-
- case ml_hs_file mod_loc of
- Nothing -> return (binds, emptyHpcInfo False, emptyModBreaks)
- Just orig_file -> do
+addTicksToBinds dflags mod mod_loc exports tyCons binds
+ | let passes = coveragePasses dflags, not (null passes),
+ Just orig_file <- ml_hs_file mod_loc = do
if "boot" `isSuffixOf` orig_file
then return (binds, emptyHpcInfo False, emptyModBreaks)
@@ -74,9 +74,8 @@ addTicksToBinds dflags mod mod_loc exports tyCons binds =
let orig_file2 = guessSourceFile binds orig_file
- (binds1,_,st)
- = unTM (addTickLHsBinds binds)
- (TTE
+ tickPass tickish (binds,st) =
+ let env = TTE
{ fileName = mkFastString orig_file2
, declPath = []
, tte_dflags = dflags
@@ -86,19 +85,18 @@ addTicksToBinds dflags mod mod_loc exports tyCons binds =
, blackList = Map.fromList
[ (getSrcSpan (tyConName tyCon),())
| tyCon <- tyCons ]
- , density = mkDensity dflags
+ , density = mkDensity tickish dflags
, this_mod = mod
- , tickishType = case hscTarget dflags of
- HscInterpreted -> Breakpoints
- _ | gopt Opt_Hpc dflags -> HpcTicks
- | gopt Opt_SccProfilingOn dflags
- -> ProfNotes
- | otherwise -> error "addTicksToBinds: No way to annotate!"
- })
- (TT
- { tickBoxCount = 0
- , mixEntries = []
- })
+ , tickishType = tickish
+ }
+ (binds',_,st') = unTM (addTickLHsBinds binds) env st
+ in (binds', st')
+ (binds1,st) = foldr tickPass
+ (binds,
+ TT { tickBoxCount = 0
+ , mixEntries = []
+ } )
+ passes
let entries = reverse $ mixEntries st
@@ -112,12 +110,13 @@ addTicksToBinds dflags mod mod_loc exports tyCons binds =
return (binds1, HpcInfo count hashNo, modBreaks)
+ | otherwise = return (binds, emptyHpcInfo False, emptyModBreaks)
guessSourceFile :: LHsBinds Id -> FilePath -> FilePath
guessSourceFile binds orig_file =
-- Try look for a file generated from a .hsc file to a
-- .hs file, by peeking ahead.
- let top_pos = catMaybes $ foldrBag (\ (_, (L pos _)) rest ->
+ let top_pos = catMaybes $ foldrBag (\ (L pos _) rest ->
srcSpanFileName_maybe pos : rest) [] binds
in
case top_pos of
@@ -152,8 +151,8 @@ writeMixEntries dflags mod count entries filename
mod_name = moduleNameString (moduleName mod)
hpc_mod_dir
- | modulePackageId mod == mainPackageId = hpc_dir
- | otherwise = hpc_dir ++ "/" ++ packageIdString (modulePackageId mod)
+ | modulePackageKey mod == mainPackageKey = hpc_dir
+ | otherwise = hpc_dir ++ "/" ++ packageKeyString (modulePackageKey mod)
tabStop = 8 -- counts as a normal char in GHC's location ranges.
@@ -181,20 +180,18 @@ data TickDensity
| TickCallSites -- for stack tracing
deriving Eq
-mkDensity :: DynFlags -> TickDensity
-mkDensity dflags
- | gopt Opt_Hpc dflags = TickForCoverage
- | HscInterpreted <- hscTarget dflags = TickForBreakPoints
- | ProfAutoAll <- profAuto dflags = TickAllFunctions
- | ProfAutoTop <- profAuto dflags = TickTopFunctions
- | ProfAutoExports <- profAuto dflags = TickExportedFunctions
- | ProfAutoCalls <- profAuto dflags = TickCallSites
- | otherwise = panic "desnity"
- -- ToDo: -fhpc is taking priority over -fprof-auto here. It seems
- -- that coverage works perfectly well with profiling, but you don't
- -- get any auto-generated SCCs. It would make perfect sense to
- -- allow both of them, and indeed to combine some of the other flags
- -- (-fprof-auto-calls -fprof-auto-top, for example)
+mkDensity :: TickishType -> DynFlags -> TickDensity
+mkDensity tickish dflags = case tickish of
+ HpcTicks -> TickForCoverage
+ SourceNotes -> TickForCoverage
+ Breakpoints -> TickForBreakPoints
+ ProfNotes ->
+ case profAuto dflags of
+ ProfAutoAll -> TickAllFunctions
+ ProfAutoTop -> TickTopFunctions
+ ProfAutoExports -> TickExportedFunctions
+ ProfAutoCalls -> TickCallSites
+ _other -> panic "mkDensity"
-- | Decide whether to add a tick to a binding or not.
shouldTickBind :: TickDensity
@@ -229,11 +226,7 @@ shouldTickPatBind density top_lev
-- Adding ticks to bindings
addTickLHsBinds :: LHsBinds Id -> TM (LHsBinds Id)
-addTickLHsBinds binds = mapBagM addTick binds
- where
- addTick (origin, bind) = do
- bind' <- addTickLHsBind bind
- return (origin, bind')
+addTickLHsBinds = mapBagM addTickLHsBind
addTickLHsBind :: LHsBind Id -> TM (LHsBind Id)
addTickLHsBind (L pos bind@(AbsBinds { abs_binds = binds,
@@ -560,6 +553,13 @@ addTickHsExpr (ArithSeq ty wit arith_seq) =
where addTickWit Nothing = return Nothing
addTickWit (Just fl) = do fl' <- addTickHsExpr fl
return (Just fl')
+
+-- We might encounter existing ticks (multiple Coverage passes)
+addTickHsExpr (HsTick t e) =
+ liftM (HsTick t) (addTickLHsExpr e)
+addTickHsExpr (HsBinTick t0 t1 e) =
+ liftM (HsBinTick t0 t1) (addTickLHsExpr e)
+
addTickHsExpr (HsTickPragma _ (L pos e0)) = do
e2 <- allocTickBox (ExpBox False) False False pos $
addTickHsExpr e0
@@ -940,8 +940,17 @@ data TickTransEnv = TTE { fileName :: FastString
-- deriving Show
-data TickishType = ProfNotes | HpcTicks | Breakpoints
+data TickishType = ProfNotes | HpcTicks | Breakpoints | SourceNotes
+coveragePasses :: DynFlags -> [TickishType]
+coveragePasses dflags =
+ ifa (hscTarget dflags == HscInterpreted) Breakpoints $
+ ifa (gopt Opt_Hpc dflags) HpcTicks $
+ ifa (gopt Opt_SccProfilingOn dflags &&
+ profAuto dflags /= NoProfAuto) ProfNotes $
+ ifa (gopt Opt_Debug dflags) SourceNotes []
+ where ifa f x xs | f = x:xs
+ | otherwise = xs
-- | Tickishs that only make sense when their source code location
-- refers to the current file. This might not always be true due to
@@ -1114,6 +1123,9 @@ mkTickish boxLabel countEntries topOnly pos fvs decl_path =
HpcTicks -> HpcTick (this_mod env) c
ProfNotes -> ProfNote cc count True{-scopes-}
Breakpoints -> Breakpoint c ids
+ SourceNotes | RealSrcSpan pos' <- pos
+ -> SourceNote pos' cc_name
+ _otherwise -> panic "mkTickish: bad source span!"
in
( tickish
, fvs
@@ -1235,9 +1247,9 @@ hpcInitCode this_mod (HpcInfo tickCount hashNo)
module_name = hcat (map (text.charToC) $
bytesFS (moduleNameFS (Module.moduleName this_mod)))
package_name = hcat (map (text.charToC) $
- bytesFS (packageIdFS (modulePackageId this_mod)))
+ bytesFS (packageKeyFS (modulePackageKey this_mod)))
full_name_str
- | modulePackageId this_mod == mainPackageId
+ | modulePackageKey this_mod == mainPackageKey
= module_name
| otherwise
= package_name <> char '/' <> module_name
diff --git a/compiler/deSugar/Desugar.lhs b/compiler/deSugar/Desugar.lhs
index e13767ff59eb..908114202646 100644
--- a/compiler/deSugar/Desugar.lhs
+++ b/compiler/deSugar/Desugar.lhs
@@ -6,6 +6,8 @@
The Desugarer: turning HsSyn into Core.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Desugar ( deSugar, deSugarExpr ) where
import DynFlags
@@ -18,6 +20,7 @@ import Id
import Name
import Type
import FamInstEnv
+import Coercion
import InstEnv
import Class
import Avail
@@ -33,8 +36,11 @@ import Module
import NameSet
import NameEnv
import Rules
+import TysPrim (eqReprPrimTyCon)
+import TysWiredIn (coercibleTyCon )
import BasicTypes ( Activation(.. ) )
import CoreMonad ( endPass, CoreToDo(..) )
+import MkCore
import FastString
import ErrUtils
import Outputable
@@ -46,8 +52,6 @@ import OrdList
import Data.List
import Data.IORef
import Control.Monad( when )
-import Data.Maybe ( mapMaybe )
-import UniqFM
\end{code}
%************************************************************************
@@ -96,15 +100,9 @@ deSugar hsc_env
; let export_set = availsToNameSet exports
target = hscTarget dflags
hpcInfo = emptyHpcInfo other_hpc_info
- want_ticks = gopt Opt_Hpc dflags
- || target == HscInterpreted
- || (gopt Opt_SccProfilingOn dflags
- && case profAuto dflags of
- NoProfAuto -> False
- _ -> True)
; (binds_cvr, ds_hpc_info, modBreaks)
- <- if want_ticks && not (isHsBoot hsc_src)
+ <- if not (isHsBoot hsc_src)
then addTicksToBinds dflags mod mod_loc export_set
(typeEnvTyCons type_env) binds
else return (binds, hpcInfo, emptyModBreaks)
@@ -119,27 +117,20 @@ deSugar hsc_env
; let hpc_init
| gopt Opt_Hpc dflags = hpcInitCode mod ds_hpc_info
| otherwise = empty
- ; let patsyn_defs = [(patSynId ps, ps) | ps <- patsyns]
; return ( ds_ev_binds
, foreign_prs `appOL` core_prs `appOL` spec_prs
, spec_rules ++ ds_rules, ds_vects
- , ds_fords `appendStubC` hpc_init
- , patsyn_defs) }
+ , ds_fords `appendStubC` hpc_init) }
; case mb_res of {
Nothing -> return (msgs, Nothing) ;
- Just (ds_ev_binds, all_prs, all_rules, vects0, ds_fords, patsyn_defs) -> do
+ Just (ds_ev_binds, all_prs, all_rules, vects0, ds_fords) -> do
do { -- Add export flags to bindings
keep_alive <- readIORef keep_var
- ; let (rules_for_locals, rules_for_imps)
- = partition isLocalRule all_rules
- final_patsyns = addExportFlagsAndRules target export_set keep_alive [] patsyn_defs
- exp_patsyn_wrappers = mapMaybe (patSynWrapper . snd) final_patsyns
- exp_patsyn_matchers = map (patSynMatcher . snd) final_patsyns
- keep_alive' = addListToUFM keep_alive (map (\x -> (x, getName x)) (exp_patsyn_wrappers ++ exp_patsyn_matchers))
- final_prs = addExportFlagsAndRules target
- export_set keep_alive' rules_for_locals (fromOL all_prs)
+ ; let (rules_for_locals, rules_for_imps) = partition isLocalRule all_rules
+ final_prs = addExportFlagsAndRules target export_set keep_alive
+ rules_for_locals (fromOL all_prs)
final_pgm = combineEvBinds ds_ev_binds final_prs
-- Notice that we put the whole lot in a big Rec, even the foreign binds
@@ -183,7 +174,7 @@ deSugar hsc_env
mg_fam_insts = fam_insts,
mg_inst_env = inst_env,
mg_fam_inst_env = fam_inst_env,
- mg_patsyns = map snd . filter (isExportedId . fst) $ final_patsyns,
+ mg_patsyns = filter ((`elemNameSet` export_set) . patSynName) patsyns,
mg_rules = ds_rules_for_imps,
mg_binds = ds_binds,
mg_foreign = ds_fords,
@@ -347,6 +338,7 @@ Reason
%************************************************************************
\begin{code}
+
dsRule :: LRuleDecl Id -> DsM (Maybe CoreRule)
dsRule (L loc (HsRule name act vars lhs _tv_lhs rhs _fv_rhs))
= putSrcSpanDs loc $
@@ -359,9 +351,11 @@ dsRule (L loc (HsRule name act vars lhs _tv_lhs rhs _fv_rhs))
; rhs' <- dsLExpr rhs
; dflags <- getDynFlags
+ ; (bndrs'', lhs'', rhs'') <- unfold_coerce bndrs' lhs' rhs'
+
-- Substitute the dict bindings eagerly,
-- and take the body apart into a (f args) form
- ; case decomposeRuleLhs bndrs' lhs' of {
+ ; case decomposeRuleLhs bndrs'' lhs'' of {
Left msg -> do { warnDs msg; return Nothing } ;
Right (final_bndrs, fn_id, args) -> do
@@ -370,7 +364,7 @@ dsRule (L loc (HsRule name act vars lhs _tv_lhs rhs _fv_rhs))
-- we don't want to attach rules to the bindings of implicit Ids,
-- because they don't show up in the bindings until just before code gen
fn_name = idName fn_id
- final_rhs = simpleOptExpr rhs' -- De-crap it
+ final_rhs = simpleOptExpr rhs'' -- De-crap it
rule = mkRule False {- Not auto -} is_local
name act fn_name final_bndrs args final_rhs
@@ -398,6 +392,27 @@ dsRule (L loc (HsRule name act vars lhs _tv_lhs rhs _fv_rhs))
; return (Just rule)
} } }
+
+-- See Note [Desugaring coerce as cast]
+unfold_coerce :: [Id] -> CoreExpr -> CoreExpr -> DsM ([Var], CoreExpr, CoreExpr)
+unfold_coerce bndrs lhs rhs = do
+ (bndrs', wrap) <- go bndrs
+ return (bndrs', wrap lhs, wrap rhs)
+ where
+ go :: [Id] -> DsM ([Id], CoreExpr -> CoreExpr)
+ go [] = return ([], id)
+ go (v:vs)
+ | Just (tc, args) <- splitTyConApp_maybe (idType v)
+ , tc == coercibleTyCon = do
+ let ty' = mkTyConApp eqReprPrimTyCon args
+ v' <- mkDerivedLocalM mkRepEqOcc v ty'
+
+ (bndrs, wrap) <- go vs
+ return (v':bndrs, mkCoreLet (NonRec v (mkEqBox (mkCoVarCo v'))) . wrap)
+ | otherwise = do
+ (bndrs,wrap) <- go vs
+ return (v:bndrs, wrap)
+
\end{code}
Note [Desugaring RULE left hand sides]
@@ -417,6 +432,20 @@ the rule is precisly to optimise them:
{-# RULES "fromRational/id" fromRational = id :: Rational -> Rational #-}
+Note [Desugaring coerce as cast]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We want the user to express a rule saying roughly “mapping a coercion over a
+list can be replaced by a coercionâ€. But the cast operator of Core (â–·) cannot
+be written in Haskell. So we use `coerce` for that (#2110). The user writes
+ map coerce = coerce
+as a RULE, and this optimizes any kind of mapped' casts aways, including `map
+MkNewtype`.
+
+For that we replace any forall'ed `c :: Coercible a b` value in a RULE by
+corresponding `co :: a ~#R b` and wrap the LHS and the RHS in
+`let c = MkCoercible co in ...`. This is later simplified to the desired form
+by simpleOptExpr (for the LHS) resp. the simplifiers (for the RHS).
+
%************************************************************************
%* *
%* Desugaring vectorisation declarations
diff --git a/compiler/deSugar/DsArrows.lhs b/compiler/deSugar/DsArrows.lhs
index 763106f2b3f4..35a2477fd531 100644
--- a/compiler/deSugar/DsArrows.lhs
+++ b/compiler/deSugar/DsArrows.lhs
@@ -6,7 +6,8 @@
Desugaring arrow commands
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -465,8 +466,8 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdIf mb_fun cond then_cmd else_cmd)
left_con <- dsLookupDataCon leftDataConName
right_con <- dsLookupDataCon rightDataConName
- let mk_left_expr ty1 ty2 e = mkConApp left_con [Type ty1, Type ty2, e]
- mk_right_expr ty1 ty2 e = mkConApp right_con [Type ty1, Type ty2, e]
+ let mk_left_expr ty1 ty2 e = mkCoreConApps left_con [Type ty1, Type ty2, e]
+ mk_right_expr ty1 ty2 e = mkCoreConApps right_con [Type ty1, Type ty2, e]
in_ty = envStackType env_ids stack_ty
then_ty = envStackType then_ids stack_ty
@@ -517,7 +518,7 @@ case bodies, containing the following fields:
\begin{code}
dsCmd ids local_vars stack_ty res_ty
- (HsCmdCase exp (MG { mg_alts = matches, mg_arg_tys = arg_tys }))
+ (HsCmdCase exp (MG { mg_alts = matches, mg_arg_tys = arg_tys, mg_origin = origin }))
env_ids = do
stack_id <- newSysLocalDs stack_ty
@@ -561,7 +562,7 @@ dsCmd ids local_vars stack_ty res_ty
in_ty = envStackType env_ids stack_ty
core_body <- dsExpr (HsCase exp (MG { mg_alts = matches', mg_arg_tys = arg_tys
- , mg_res_ty = sum_ty }))
+ , mg_res_ty = sum_ty, mg_origin = origin }))
-- Note that we replace the HsCase result type by sum_ty,
-- which is the type of matches'
diff --git a/compiler/deSugar/DsBinds.lhs b/compiler/deSugar/DsBinds.lhs
index cd683ba36592..172d19b9ac5b 100644
--- a/compiler/deSugar/DsBinds.lhs
+++ b/compiler/deSugar/DsBinds.lhs
@@ -10,7 +10,8 @@ in that the @Rec@/@NonRec@/etc structure is thrown away (whereas at
lower levels it is preserved with @let@/@letrec@s).
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -34,6 +35,7 @@ import HsSyn -- lots of things
import CoreSyn -- lots of things
import Literal ( Literal(MachStr) )
import CoreSubst
+import OccurAnal ( occurAnalyseExpr )
import MkCore
import CoreUtils
import CoreArity ( etaExpand )
@@ -95,13 +97,8 @@ ds_lhs_binds :: LHsBinds Id -> DsM (OrdList (Id,CoreExpr))
ds_lhs_binds binds = do { ds_bs <- mapBagM dsLHsBind binds
; return (foldBag appOL id nilOL ds_bs) }
-dsLHsBind :: (Origin, LHsBind Id) -> DsM (OrdList (Id,CoreExpr))
-dsLHsBind (origin, L loc bind)
- = handleWarnings $ putSrcSpanDs loc $ dsHsBind bind
- where
- handleWarnings = if isGenerated origin
- then discardWarningsDs
- else id
+dsLHsBind :: LHsBind Id -> DsM (OrdList (Id,CoreExpr))
+dsLHsBind (L loc bind) = putSrcSpanDs loc $ dsHsBind bind
dsHsBind :: HsBind Id -> DsM (OrdList (Id,CoreExpr))
@@ -458,7 +455,10 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl))
; (bndrs, ds_lhs) <- liftM collectBinders
(dsHsWrapper spec_co (Var poly_id))
; let spec_ty = mkPiTypes bndrs (exprType ds_lhs)
- ; case decomposeRuleLhs bndrs ds_lhs of {
+ ; -- pprTrace "dsRule" (vcat [ ptext (sLit "Id:") <+> ppr poly_id
+ -- , ptext (sLit "spec_co:") <+> ppr spec_co
+ -- , ptext (sLit "ds_rhs:") <+> ppr ds_lhs ]) $
+ case decomposeRuleLhs bndrs ds_lhs of {
Left msg -> do { warnDs msg; return Nothing } ;
Right (rule_bndrs, _fn, args) -> do
@@ -582,73 +582,173 @@ SPEC f :: ty [n] INLINE [k]
decomposeRuleLhs :: [Var] -> CoreExpr -> Either SDoc ([Var], Id, [CoreExpr])
-- (decomposeRuleLhs bndrs lhs) takes apart the LHS of a RULE,
-- The 'bndrs' are the quantified binders of the rules, but decomposeRuleLhs
--- may add some extra dictionary binders (see Note [Constant rule dicts])
+-- may add some extra dictionary binders (see Note [Free dictionaries])
--
-- Returns Nothing if the LHS isn't of the expected shape
-decomposeRuleLhs bndrs lhs
- = -- Note [Simplifying the left-hand side of a RULE]
- case collectArgs opt_lhs of
- (Var fn, args) -> check_bndrs fn args
-
- (Case scrut bndr ty [(DEFAULT, _, body)], args)
- | isDeadBinder bndr -- Note [Matching seqId]
- -> check_bndrs seqId (args' ++ args)
- where
- args' = [Type (idType bndr), Type ty, scrut, body]
-
- _other -> Left bad_shape_msg
+-- Note [Decomposing the left-hand side of a RULE]
+decomposeRuleLhs orig_bndrs orig_lhs
+ | not (null unbound) -- Check for things unbound on LHS
+ -- See Note [Unused spec binders]
+ = Left (vcat (map dead_msg unbound))
+
+ | Var fn_var <- fun
+ , not (fn_var `elemVarSet` orig_bndr_set)
+ = -- pprTrace "decmposeRuleLhs" (vcat [ ptext (sLit "orig_bndrs:") <+> ppr orig_bndrs
+ -- , ptext (sLit "orig_lhs:") <+> ppr orig_lhs
+ -- , ptext (sLit "lhs1:") <+> ppr lhs1
+ -- , ptext (sLit "bndrs1:") <+> ppr bndrs1
+ -- , ptext (sLit "fn_var:") <+> ppr fn_var
+ -- , ptext (sLit "args:") <+> ppr args]) $
+ Right (bndrs1, fn_var, args)
+
+ | Case scrut bndr ty [(DEFAULT, _, body)] <- fun
+ , isDeadBinder bndr -- Note [Matching seqId]
+ , let args' = [Type (idType bndr), Type ty, scrut, body]
+ = Right (bndrs1, seqId, args' ++ args)
+
+ | otherwise
+ = Left bad_shape_msg
where
- opt_lhs = simpleOptExpr lhs
-
- check_bndrs fn args
- | null dead_bndrs = Right (extra_dict_bndrs ++ bndrs, fn, args)
- | otherwise = Left (vcat (map dead_msg dead_bndrs))
- where
- arg_fvs = exprsFreeVars args
-
- -- Check for dead binders: Note [Unused spec binders]
- dead_bndrs = filterOut (`elemVarSet` arg_fvs) bndrs
+ lhs1 = drop_dicts orig_lhs
+ lhs2 = simpleOptExpr lhs1 -- See Note [Simplify rule LHS]
+ (fun,args) = collectArgs lhs2
+ lhs_fvs = exprFreeVars lhs2
+ unbound = filterOut (`elemVarSet` lhs_fvs) orig_bndrs
+ bndrs1 = orig_bndrs ++ extra_dict_bndrs
- -- Add extra dict binders: Note [Constant rule dicts]
- extra_dict_bndrs = [ mkLocalId (localiseName (idName d)) (idType d)
- | d <- varSetElems (arg_fvs `delVarSetList` bndrs)
- , isDictId d]
+ orig_bndr_set = mkVarSet orig_bndrs
+ -- Add extra dict binders: Note [Free dictionaries]
+ extra_dict_bndrs = [ mkLocalId (localiseName (idName d)) (idType d)
+ | d <- varSetElems (lhs_fvs `delVarSetList` orig_bndrs)
+ , isDictId d ]
bad_shape_msg = hang (ptext (sLit "RULE left-hand side too complicated to desugar"))
- 2 (ppr opt_lhs)
+ 2 (vcat [ text "Optimised lhs:" <+> ppr lhs2
+ , text "Orig lhs:" <+> ppr orig_lhs])
dead_msg bndr = hang (sep [ ptext (sLit "Forall'd") <+> pp_bndr bndr
, ptext (sLit "is not bound in RULE lhs")])
- 2 (ppr opt_lhs)
+ 2 (vcat [ text "Orig bndrs:" <+> ppr orig_bndrs
+ , text "Orig lhs:" <+> ppr orig_lhs
+ , text "optimised lhs:" <+> ppr lhs2 ])
pp_bndr bndr
| isTyVar bndr = ptext (sLit "type variable") <+> quotes (ppr bndr)
| Just pred <- evVarPred_maybe bndr = ptext (sLit "constraint") <+> quotes (ppr pred)
| otherwise = ptext (sLit "variable") <+> quotes (ppr bndr)
+
+ drop_dicts :: CoreExpr -> CoreExpr
+ drop_dicts e
+ = wrap_lets needed bnds body
+ where
+ needed = orig_bndr_set `minusVarSet` exprFreeVars body
+ (bnds, body) = split_lets (occurAnalyseExpr e)
+ -- The occurAnalyseExpr drops dead bindings which is
+ -- crucial to ensure that every binding is used later;
+ -- which in turn makes wrap_lets work right
+
+ split_lets :: CoreExpr -> ([(DictId,CoreExpr)], CoreExpr)
+ split_lets e
+ | Let (NonRec d r) body <- e
+ , isDictId d
+ , (bs, body') <- split_lets body
+ = ((d,r):bs, body')
+ | otherwise
+ = ([], e)
+
+ wrap_lets :: VarSet -> [(DictId,CoreExpr)] -> CoreExpr -> CoreExpr
+ wrap_lets _ [] body = body
+ wrap_lets needed ((d, r) : bs) body
+ | rhs_fvs `intersectsVarSet` needed = Let (NonRec d r) (wrap_lets needed' bs body)
+ | otherwise = wrap_lets needed bs body
+ where
+ rhs_fvs = exprFreeVars r
+ needed' = (needed `minusVarSet` rhs_fvs) `extendVarSet` d
\end{code}
-Note [Simplifying the left-hand side of a RULE]
+Note [Decomposing the left-hand side of a RULE]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-simpleOptExpr occurrence-analyses and simplifies the lhs
-and thereby
-(a) sorts dict bindings into NonRecs and inlines them
-(b) substitute trivial lets so that they don't get in the way
- Note that we substitute the function too; we might
- have this as a LHS: let f71 = M.f Int in f71
-(c) does eta reduction
-
-For (c) consider the fold/build rule, which without simplification
-looked like:
- fold k z (build (/\a. g a)) ==> ...
-This doesn't match unless you do eta reduction on the build argument.
-Similarly for a LHS like
- augment g (build h)
-we do not want to get
- augment (\a. g a) (build h)
-otherwise we don't match when given an argument like
- augment (\a. h a a) (build h)
-
-NB: tcSimplifyRuleLhs is very careful not to generate complicated
- dictionary expressions that we might have to match
+There are several things going on here.
+* drop_dicts: see Note [Drop dictionary bindings on rule LHS]
+* simpleOptExpr: see Note [Simplify rule LHS]
+* extra_dict_bndrs: see Note [Free dictionaries]
+
+Note [Drop dictionary bindings on rule LHS]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+drop_dicts drops dictionary bindings on the LHS where possible.
+ E.g. let d:Eq [Int] = $fEqList $fEqInt in f d
+ --> f d
+ Reasoning here is that there is only one d:Eq [Int], and so we can
+ quantify over it. That makes 'd' free in the LHS, but that is later
+ picked up by extra_dict_bndrs (Note [Dead spec binders]).
+
+ NB 1: We can only drop the binding if the RHS doesn't bind
+ one of the orig_bndrs, which we assume occur on RHS.
+ Example
+ f :: (Eq a) => b -> a -> a
+ {-# SPECIALISE f :: Eq a => b -> [a] -> [a] #-}
+ Here we want to end up with
+ RULE forall d:Eq a. f ($dfEqList d) = f_spec d
+ Of course, the ($dfEqlist d) in the pattern makes it less likely
+ to match, but ther is no other way to get d:Eq a
+
+ NB 2: We do drop_dicts *before* simplOptEpxr, so that we expect all
+ the evidence bindings to be wrapped around the outside of the
+ LHS. (After simplOptExpr they'll usually have been inlined.)
+ dsHsWrapper does dependency analysis, so that civilised ones
+ will be simple NonRec bindings. We don't handle recursive
+ dictionaries!
+
+ NB3: In the common case of a non-overloaded, but perhpas-polymorphic
+ specialisation, we don't need to bind *any* dictionaries for use
+ in the RHS. For example (Trac #8331)
+ {-# SPECIALIZE INLINE useAbstractMonad :: ReaderST s Int #-}
+ useAbstractMonad :: MonadAbstractIOST m => m Int
+ Here, deriving (MonadAbstractIOST (ReaderST s)) is a lot of code
+ but the RHS uses no dictionaries, so we want to end up with
+ RULE forall s (d :: MonadBstractIOST (ReaderT s)).
+ useAbstractMonad (ReaderT s) d = $suseAbstractMonad s
+
+ Trac #8848 is a good example of where there are some intersting
+ dictionary bindings to discard.
+
+The drop_dicts algorithm is based on these observations:
+
+ * Given (let d = rhs in e) where d is a DictId,
+ matching 'e' will bind e's free variables.
+
+ * So we want to keep the binding if one of the needed variables (for
+ which we need a binding) is in fv(rhs) but not already in fv(e).
+
+ * The "needed variables" are simply the orig_bndrs. Consider
+ f :: (Eq a, Show b) => a -> b -> String
+ {-# SPECIALISE f :: (Show b) => Int -> b -> String
+ Then orig_bndrs includes the *quantified* dictionaries of the type
+ namely (dsb::Show b), but not the one for Eq Int
+
+So we work inside out, applying the above criterion at each step.
+
+
+Note [Simplify rule LHS]
+~~~~~~~~~~~~~~~~~~~~~~~~
+simplOptExpr occurrence-analyses and simplifies the LHS:
+
+ (a) Inline any remaining dictionary bindings (which hopefully
+ occur just once)
+
+ (b) Substitute trivial lets so that they don't get in the way
+ Note that we substitute the function too; we might
+ have this as a LHS: let f71 = M.f Int in f71
+
+ (c) Do eta reduction. To see why, consider the fold/build rule,
+ which without simplification looked like:
+ fold k z (build (/\a. g a)) ==> ...
+ This doesn't match unless you do eta reduction on the build argument.
+ Similarly for a LHS like
+ augment g (build h)
+ we do not want to get
+ augment (\a. g a) (build h)
+ otherwise we don't match when given an argument like
+ augment (\a. h a a) (build h)
Note [Matching seqId]
~~~~~~~~~~~~~~~~~~~
@@ -671,8 +771,8 @@ the constraint is unused. We could bind 'd' to (error "unused")
but it seems better to reject the program because it's almost certainly
a mistake. That's what the isDeadBinder call detects.
-Note [Constant rule dicts]
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note [Free dictionaries]
+~~~~~~~~~~~~~~~~~~~~~~~~
When the LHS of a specialisation rule, (/\as\ds. f es) has a free dict,
which is presumably in scope at the function definition site, we can quantify
over it too. *Any* dict with that type will do.
diff --git a/compiler/deSugar/DsCCall.lhs b/compiler/deSugar/DsCCall.lhs
index f3f0adc66871..a47b9ea4ddb9 100644
--- a/compiler/deSugar/DsCCall.lhs
+++ b/compiler/deSugar/DsCCall.lhs
@@ -6,7 +6,8 @@
Desugaring foreign calls
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -32,7 +33,6 @@ import CoreUtils
import MkCore
import Var
import MkId
-import Maybes
import ForeignCall
import DataCon
@@ -50,6 +50,8 @@ import VarSet
import DynFlags
import Outputable
import Util
+
+import Data.Maybe
\end{code}
Desugaring of @ccall@s consists of adding some state manipulation,
@@ -177,7 +179,7 @@ unboxArg arg
-- data MutableByteArray s ix = MutableByteArray ix ix (MutableByteArray# s)
| is_product_type &&
data_con_arity == 3 &&
- maybeToBool maybe_arg3_tycon &&
+ isJust maybe_arg3_tycon &&
(arg3_tycon == byteArrayPrimTyCon ||
arg3_tycon == mutableByteArrayPrimTyCon)
= do case_bndr <- newSysLocalDs arg_ty
@@ -192,7 +194,7 @@ unboxArg arg
where
arg_ty = exprType arg
maybe_product_type = splitDataProductType_maybe arg_ty
- is_product_type = maybeToBool maybe_product_type
+ is_product_type = isJust maybe_product_type
Just (_, _, data_con, data_con_arg_tys) = maybe_product_type
data_con_arity = dataConSourceArity data_con
(data_con_arg_ty1 : _) = data_con_arg_tys
@@ -236,9 +238,9 @@ boxResult result_ty
_ -> []
return_result state anss
- = mkConApp (tupleCon UnboxedTuple (2 + length extra_result_tys))
- (map Type (realWorldStatePrimTy : io_res_ty : extra_result_tys)
- ++ (state : anss))
+ = mkCoreConApps (tupleCon UnboxedTuple (2 + length extra_result_tys))
+ (map Type (realWorldStatePrimTy : io_res_ty : extra_result_tys)
+ ++ (state : anss))
; (ccall_res_ty, the_alt) <- mk_alt return_result res
diff --git a/compiler/deSugar/DsExpr.lhs b/compiler/deSugar/DsExpr.lhs
index 546a198ca859..2a2d73399562 100644
--- a/compiler/deSugar/DsExpr.lhs
+++ b/compiler/deSugar/DsExpr.lhs
@@ -6,6 +6,8 @@
Desugaring exporessions.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module DsExpr ( dsExpr, dsLExpr, dsLocalBinds, dsValBinds, dsLit ) where
#include "HsVersions.h"
@@ -99,7 +101,7 @@ ds_val_bind :: (RecFlag, LHsBinds Id) -> CoreExpr -> DsM CoreExpr
-- a tuple and doing selections.
-- Silently ignore INLINE and SPECIALISE pragmas...
ds_val_bind (NonRecursive, hsbinds) body
- | [(_, L loc bind)] <- bagToList hsbinds,
+ | [L loc bind] <- bagToList hsbinds,
-- Non-recursive, non-overloaded bindings only come in ones
-- ToDo: in some bizarre case it's conceivable that there
-- could be dict binds in the 'binds'. (See the notes
@@ -130,11 +132,11 @@ dsStrictBind :: HsBind Id -> CoreExpr -> DsM CoreExpr
dsStrictBind (AbsBinds { abs_tvs = [], abs_ev_vars = []
, abs_exports = exports
, abs_ev_binds = ev_binds
- , abs_binds = binds }) body
+ , abs_binds = lbinds }) body
= do { let body1 = foldr bind_export body exports
bind_export export b = bindNonRec (abe_poly export) (Var (abe_mono export)) b
- ; body2 <- foldlBagM (\body (_, bind) -> dsStrictBind (unLoc bind) body)
- body1 binds
+ ; body2 <- foldlBagM (\body lbind -> dsStrictBind (unLoc lbind) body)
+ body1 lbinds
; ds_binds <- dsTcEvBinds ev_binds
; return (mkCoreLets ds_binds body2) }
@@ -163,11 +165,11 @@ dsStrictBind bind body = pprPanic "dsLet: unlifted" (ppr bind $$ ppr body)
----------------------
strictMatchOnly :: HsBind Id -> Bool
-strictMatchOnly (AbsBinds { abs_binds = binds })
- = anyBag (strictMatchOnly . unLoc . snd) binds
-strictMatchOnly (PatBind { pat_lhs = lpat, pat_rhs_ty = ty })
- = isUnLiftedType ty
- || isBangLPat lpat
+strictMatchOnly (AbsBinds { abs_binds = lbinds })
+ = anyBag (strictMatchOnly . unLoc) lbinds
+strictMatchOnly (PatBind { pat_lhs = lpat, pat_rhs_ty = rhs_ty })
+ = isUnLiftedType rhs_ty
+ || isStrictLPat lpat
|| any (isUnLiftedType . idType) (collectPatBinders lpat)
strictMatchOnly (FunBind { fun_id = L _ id })
= isUnLiftedType (idType id)
@@ -290,9 +292,9 @@ dsExpr (ExplicitTuple tup_args boxity)
; (lam_vars, args) <- foldM go ([], []) (reverse tup_args)
-- The reverse is because foldM goes left-to-right
- ; return $ mkCoreLams lam_vars $
- mkConApp (tupleCon (boxityNormalTupleSort boxity) (length tup_args))
- (map (Type . exprType) args ++ args) }
+ ; return $ mkCoreLams lam_vars $
+ mkCoreConApps (tupleCon (boxityNormalTupleSort boxity) (length tup_args))
+ (map (Type . exprType) args ++ args) }
dsExpr (HsSCC cc expr@(L loc _)) = do
mod_name <- getModule
@@ -433,7 +435,7 @@ dsExpr (RecordCon (L _ data_con_id) con_expr rbinds) = do
then mapM unlabelled_bottom arg_tys
else mapM mk_arg (zipEqual "dsExpr:RecordCon" arg_tys labels)
- return (mkApps con_expr' con_args)
+ return (mkCoreApps con_expr' con_args)
\end{code}
Record update is a little harder. Suppose we have the decl:
@@ -488,7 +490,7 @@ dsExpr expr@(RecordUpd record_expr (HsRecFields { rec_flds = fields })
-- constructor aguments.
; alts <- mapM (mk_alt upd_fld_env) cons_to_upd
; ([discrim_var], matching_code)
- <- matchWrapper RecUpd (MG { mg_alts = alts, mg_arg_tys = [in_ty], mg_res_ty = out_ty })
+ <- matchWrapper RecUpd (MG { mg_alts = alts, mg_arg_tys = [in_ty], mg_res_ty = out_ty, mg_origin = Generated })
; return (add_field_binds field_binds' $
bindNonRec discrim_var record_expr' matching_code) }
@@ -548,7 +550,7 @@ dsExpr expr@(RecordUpd record_expr (HsRecFields { rec_flds = fields })
, pat_dicts = eqs_vars ++ theta_vars
, pat_binds = emptyTcEvBinds
, pat_args = PrefixCon $ map nlVarPat arg_ids
- , pat_ty = in_ty
+ , pat_arg_tys = in_inst_tys
, pat_wrap = idHsWrapper }
; let wrapped_rhs | null eq_spec = rhs
| otherwise = mkLHsWrap (mkWpCast (mkTcSubCo wrap_co)) rhs
@@ -789,7 +791,8 @@ dsDo stmts
rets = map noLoc rec_rets
mfix_app = nlHsApp (noLoc mfix_op) mfix_arg
mfix_arg = noLoc $ HsLam (MG { mg_alts = [mkSimpleMatch [mfix_pat] body]
- , mg_arg_tys = [tup_ty], mg_res_ty = body_ty })
+ , mg_arg_tys = [tup_ty], mg_res_ty = body_ty
+ , mg_origin = Generated })
mfix_pat = noLoc $ LazyPat $ mkBigLHsPatTup rec_tup_pats
body = noLoc $ HsDo DoExpr (rec_stmts ++ [ret_stmt]) body_ty
ret_app = nlHsApp (noLoc return_op) (mkBigLHsTup rets)
diff --git a/compiler/deSugar/DsForeign.lhs b/compiler/deSugar/DsForeign.lhs
index e2f4f4ff3cd3..c60e9146bca9 100644
--- a/compiler/deSugar/DsForeign.lhs
+++ b/compiler/deSugar/DsForeign.lhs
@@ -6,6 +6,8 @@
Desugaring foreign declarations (see also DsCCall).
\begin{code}
+{-# LANGUAGE CPP #-}
+
module DsForeign ( dsForeigns
, dsForeigns'
, dsFImport, dsCImport, dsFCall, dsPrimCall
@@ -222,9 +224,9 @@ dsFCall fn_id co fcall mDeclHeader = do
dflags <- getDynFlags
(fcall', cDoc) <-
case fcall of
- CCall (CCallSpec (StaticTarget cName mPackageId isFun) CApiConv safety) ->
+ CCall (CCallSpec (StaticTarget cName mPackageKey isFun) CApiConv safety) ->
do wrapperName <- mkWrapperName "ghc_wrapper" (unpackFS cName)
- let fcall' = CCall (CCallSpec (StaticTarget wrapperName mPackageId True) CApiConv safety)
+ let fcall' = CCall (CCallSpec (StaticTarget wrapperName mPackageKey True) CApiConv safety)
c = includes
$$ fun_proto <+> braces (cRet <> semi)
includes = vcat [ text "#include <" <> ftext h <> text ">"
diff --git a/compiler/deSugar/DsGRHSs.lhs b/compiler/deSugar/DsGRHSs.lhs
index 4573e54ce0af..a571e807d4bf 100644
--- a/compiler/deSugar/DsGRHSs.lhs
+++ b/compiler/deSugar/DsGRHSs.lhs
@@ -6,6 +6,8 @@
Matching guarded right-hand-sides (GRHSs)
\begin{code}
+{-# LANGUAGE CPP #-}
+
module DsGRHSs ( dsGuarded, dsGRHSs, dsGRHS ) where
#include "HsVersions.h"
@@ -61,10 +63,8 @@ dsGRHSs hs_ctx _ (GRHSs grhss binds) rhs_ty
= ASSERT( notNull grhss )
do { match_results <- mapM (dsGRHS hs_ctx rhs_ty) grhss
; let match_result1 = foldr1 combineMatchResults match_results
- match_result2 = adjustMatchResultDs
- (\e -> dsLocalBinds binds e)
- match_result1
- -- NB: nested dsLet inside matchResult
+ match_result2 = adjustMatchResultDs (dsLocalBinds binds) match_result1
+ -- NB: nested dsLet inside matchResult
; return match_result2 }
dsGRHS :: HsMatchContext Name -> Type -> LGRHS Id (LHsExpr Id) -> DsM MatchResult
diff --git a/compiler/deSugar/DsListComp.lhs b/compiler/deSugar/DsListComp.lhs
index a1131a812672..2111c95f8285 100644
--- a/compiler/deSugar/DsListComp.lhs
+++ b/compiler/deSugar/DsListComp.lhs
@@ -6,7 +6,7 @@
Desugaring list comprehensions, monad comprehensions and array comprehensions
\begin{code}
-{-# LANGUAGE NamedFieldPuns #-}
+{-# LANGUAGE CPP, NamedFieldPuns #-}
module DsListComp ( dsListComp, dsPArrComp, dsMonadComp ) where
diff --git a/compiler/deSugar/DsMeta.hs b/compiler/deSugar/DsMeta.hs
index 56fba1434f3b..28e6feffece2 100644
--- a/compiler/deSugar/DsMeta.hs
+++ b/compiler/deSugar/DsMeta.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 2006
@@ -277,7 +279,7 @@ repFamilyDecl (L loc (FamilyDecl { fdInfo = info,
fdKindSig = opt_kind }))
= do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences]
; dec <- addTyClTyVarBinds tvs $ \bndrs ->
- case (opt_kind, info) of
+ case (opt_kind, info) of
(Nothing, ClosedTypeFamily eqns) ->
do { eqns1 <- mapM repTyFamEqn eqns
; eqns2 <- coreList tySynEqnQTyConName eqns1
@@ -286,13 +288,13 @@ repFamilyDecl (L loc (FamilyDecl { fdInfo = info,
do { eqns1 <- mapM repTyFamEqn eqns
; eqns2 <- coreList tySynEqnQTyConName eqns1
; ki1 <- repLKind ki
- ; repClosedFamilyKind tc1 bndrs ki1 eqns2 }
+ ; repClosedFamilyKind tc1 bndrs ki1 eqns2 }
(Nothing, _) ->
do { info' <- repFamilyInfo info
; repFamilyNoKind info' tc1 bndrs }
(Just ki, _) ->
do { info' <- repFamilyInfo info
- ; ki1 <- repLKind ki
+ ; ki1 <- repLKind ki
; repFamilyKind info' tc1 bndrs ki1 }
; return (loc, dec)
}
@@ -389,15 +391,15 @@ repClsInstD (ClsInstDecl { cid_poly_ty = ty, cid_binds = binds
repTyFamInstD :: TyFamInstDecl Name -> DsM (Core TH.DecQ)
repTyFamInstD decl@(TyFamInstDecl { tfid_eqn = eqn })
= do { let tc_name = tyFamInstDeclLName decl
- ; tc <- lookupLOcc tc_name -- See note [Binders and occurrences]
+ ; tc <- lookupLOcc tc_name -- See note [Binders and occurrences]
; eqn1 <- repTyFamEqn eqn
; repTySynInst tc eqn1 }
repTyFamEqn :: LTyFamInstEqn Name -> DsM (Core TH.TySynEqnQ)
-repTyFamEqn (L loc (TyFamInstEqn { tfie_pats = HsWB { hswb_cts = tys
- , hswb_kvs = kv_names
- , hswb_tvs = tv_names }
- , tfie_rhs = rhs }))
+repTyFamEqn (L loc (TyFamEqn { tfe_pats = HsWB { hswb_cts = tys
+ , hswb_kvs = kv_names
+ , hswb_tvs = tv_names }
+ , tfe_rhs = rhs }))
= do { let hs_tvs = HsQTvs { hsq_kvs = kv_names
, hsq_tvs = userHsTyVarBndrs loc tv_names } -- Yuk
; addTyClTyVarBinds hs_tvs $ \ _ ->
@@ -705,12 +707,14 @@ addTyVarBinds :: LHsTyVarBndrs Name -- the binders to
-- the computations passed as the second argument is executed in that extended
-- meta environment and gets the *new* names on Core-level as an argument
-addTyVarBinds tvs m
- = do { freshNames <- mkGenSyms (hsLKiTyVarNames tvs)
- ; term <- addBinds freshNames $
- do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (hsQTvBndrs tvs `zip` freshNames)
+addTyVarBinds (HsQTvs { hsq_kvs = kvs, hsq_tvs = tvs }) m
+ = do { fresh_kv_names <- mkGenSyms kvs
+ ; fresh_tv_names <- mkGenSyms (map hsLTyVarName tvs)
+ ; let fresh_names = fresh_kv_names ++ fresh_tv_names
+ ; term <- addBinds fresh_names $
+ do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (tvs `zip` fresh_tv_names)
; m kbs }
- ; wrapGenSyms freshNames term }
+ ; wrapGenSyms fresh_names term }
where
mk_tv_bndr (tv, (_,v)) = repTyVarBndrWithKind tv (coreVar v)
@@ -754,31 +758,9 @@ repLContext :: LHsContext Name -> DsM (Core TH.CxtQ)
repLContext (L _ ctxt) = repContext ctxt
repContext :: HsContext Name -> DsM (Core TH.CxtQ)
-repContext ctxt = do preds <- repList predQTyConName repLPred ctxt
+repContext ctxt = do preds <- repList typeQTyConName repLTy ctxt
repCtxt preds
--- represent a type predicate
---
-repLPred :: LHsType Name -> DsM (Core TH.PredQ)
-repLPred (L _ p) = repPred p
-
-repPred :: HsType Name -> DsM (Core TH.PredQ)
-repPred (HsParTy ty)
- = repLPred ty
-repPred ty
- | Just (cls, tys) <- splitHsClassTy_maybe ty
- = do
- cls1 <- lookupOcc cls
- tys1 <- repList typeQTyConName repLTy tys
- repClassP cls1 tys1
-repPred (HsEqTy tyleft tyright)
- = do
- tyleft1 <- repLTy tyleft
- tyright1 <- repLTy tyright
- repEqualP tyleft1 tyright1
-repPred ty
- = notHandled "Exotic predicate type" (ppr ty)
-
-- yield the representation of a list of types
--
repLTys :: [LHsType Name] -> DsM [Core TH.TypeQ]
@@ -833,6 +815,11 @@ repTy (HsTupleTy _ tys) = do tys1 <- repLTys tys
repTy (HsOpTy ty1 (_, n) ty2) = repLTy ((nlHsTyVar (unLoc n) `nlHsAppTy` ty1)
`nlHsAppTy` ty2)
repTy (HsParTy t) = repLTy t
+repTy (HsEqTy t1 t2) = do
+ t1' <- repLTy t1
+ t2' <- repLTy t2
+ eq <- repTequality
+ repTapps eq [t1', t2']
repTy (HsKindSig t k) = do
t1 <- repLTy t
k1 <- repLKind k
@@ -848,6 +835,7 @@ repTy (HsExplicitTupleTy _ tys) = do
repTy (HsTyLit lit) = do
lit' <- repTyLit lit
repTLit lit'
+
repTy ty = notHandled "Exotic form of type" (ppr ty)
repTyLit :: HsTyLit -> DsM (Core TH.TyLitQ)
@@ -1196,7 +1184,7 @@ rep_binds binds = do { binds_w_locs <- rep_binds' binds
; return (de_loc (sort_by_loc binds_w_locs)) }
rep_binds' :: LHsBinds Name -> DsM [(SrcSpan, Core TH.DecQ)]
-rep_binds' binds = mapM (rep_bind . snd) (bagToList binds)
+rep_binds' = mapM rep_bind . bagToList
rep_bind :: LHsBind Name -> DsM (SrcSpan, Core TH.DecQ)
-- Assumes: all the binders of the binding are alrady in the meta-env
@@ -1238,7 +1226,7 @@ rep_bind (L _ (VarBind { var_id = v, var_rhs = e}))
; return (srcLocSpan (getSrcLoc v), ans) }
rep_bind (L _ (AbsBinds {})) = panic "rep_bind: AbsBinds"
-rep_bind (L _ (PatSynBind {})) = panic "rep_bind: PatSynBind"
+rep_bind (L _ dec@(PatSynBind {})) = notHandled "pattern synonyms" (ppr dec)
-----------------------------------------------------------------------------
-- Since everything in a Bind is mutually recursive we need rename all
-- all the variables simultaneously. For example:
@@ -1428,7 +1416,7 @@ globalVar name
where
mod = ASSERT( isExternalName name) nameModule name
name_mod = moduleNameString (moduleName mod)
- name_pkg = packageIdString (modulePackageId mod)
+ name_pkg = packageKeyString (modulePackageKey mod)
name_occ = nameOccName name
mk_varg | OccName.isDataOcc name_occ = mkNameG_dName
| OccName.isVarOcc name_occ = mkNameG_vName
@@ -1488,7 +1476,7 @@ rep2 n xs = do { id <- dsLookupGlobalId n
dataCon' :: Name -> [CoreExpr] -> DsM (Core a)
dataCon' n args = do { id <- dsLookupDataCon n
- ; return $ MkC $ mkConApp id args }
+ ; return $ MkC $ mkCoreConApps id args }
dataCon :: Name -> DsM (Core a)
dataCon n = dataCon' n []
@@ -1772,12 +1760,6 @@ repProto (MkC s) (MkC ty) = rep2 sigDName [s, ty]
repCtxt :: Core [TH.PredQ] -> DsM (Core TH.CxtQ)
repCtxt (MkC tys) = rep2 cxtName [tys]
-repClassP :: Core TH.Name -> Core [TH.TypeQ] -> DsM (Core TH.PredQ)
-repClassP (MkC cla) (MkC tys) = rep2 classPName [cla, tys]
-
-repEqualP :: Core TH.TypeQ -> Core TH.TypeQ -> DsM (Core TH.PredQ)
-repEqualP (MkC ty1) (MkC ty2) = rep2 equalPName [ty1, ty2]
-
repConstr :: Core TH.Name -> HsConDeclDetails Name
-> DsM (Core TH.ConQ)
repConstr con (PrefixCon ps)
@@ -1816,6 +1798,9 @@ repTapps f (t:ts) = do { f1 <- repTapp f t; repTapps f1 ts }
repTSig :: Core TH.TypeQ -> Core TH.Kind -> DsM (Core TH.TypeQ)
repTSig (MkC ty) (MkC ki) = rep2 sigTName [ty, ki]
+repTequality :: DsM (Core TH.TypeQ)
+repTequality = rep2 equalityTName []
+
repTPromotedList :: [Core TH.TypeQ] -> DsM (Core TH.TypeQ)
repTPromotedList [] = repPromotedNilTyCon
repTPromotedList (t:ts) = do { tcon <- repPromotedConsTyCon
@@ -2069,8 +2054,6 @@ templateHaskellNames = [
roleAnnotDName,
-- Cxt
cxtName,
- -- Pred
- classPName, equalPName,
-- Strict
isStrictName, notStrictName, unpackedName,
-- Con
@@ -2080,7 +2063,7 @@ templateHaskellNames = [
-- VarStrictType
varStrictTypeName,
-- Type
- forallTName, varTName, conTName, appTName,
+ forallTName, varTName, conTName, appTName, equalityTName,
tupleTName, unboxedTupleTName, arrowTName, listTName, sigTName, litTName,
promotedTName, promotedTupleTName, promotedNilTName, promotedConsTName,
-- TyLit
@@ -2134,7 +2117,7 @@ thLib = mkTHModule (fsLit "Language.Haskell.TH.Lib")
qqLib = mkTHModule (fsLit "Language.Haskell.TH.Quote")
mkTHModule :: FastString -> Module
-mkTHModule m = mkModule thPackageId (mkModuleNameFS m)
+mkTHModule m = mkModule thPackageKey (mkModuleNameFS m)
libFun, libTc, thFun, thTc, thCon, qqFun :: FastString -> Unique -> Name
libFun = mk_known_key_name OccName.varName thLib
@@ -2323,11 +2306,6 @@ roleAnnotDName = libFun (fsLit "roleAnnotD") roleAnnotDIdKey
cxtName :: Name
cxtName = libFun (fsLit "cxt") cxtIdKey
--- data Pred = ...
-classPName, equalPName :: Name
-classPName = libFun (fsLit "classP") classPIdKey
-equalPName = libFun (fsLit "equalP") equalPIdKey
-
-- data Strict = ...
isStrictName, notStrictName, unpackedName :: Name
isStrictName = libFun (fsLit "isStrict") isStrictKey
@@ -2351,7 +2329,7 @@ varStrictTypeName = libFun (fsLit "varStrictType") varStrictTKey
-- data Type = ...
forallTName, varTName, conTName, tupleTName, unboxedTupleTName, arrowTName,
- listTName, appTName, sigTName, litTName,
+ listTName, appTName, sigTName, equalityTName, litTName,
promotedTName, promotedTupleTName,
promotedNilTName, promotedConsTName :: Name
forallTName = libFun (fsLit "forallT") forallTIdKey
@@ -2363,6 +2341,7 @@ arrowTName = libFun (fsLit "arrowT") arrowTIdKey
listTName = libFun (fsLit "listT") listTIdKey
appTName = libFun (fsLit "appT") appTIdKey
sigTName = libFun (fsLit "sigT") sigTIdKey
+equalityTName = libFun (fsLit "equalityT") equalityTIdKey
litTName = libFun (fsLit "litT") litTIdKey
promotedTName = libFun (fsLit "promotedT") promotedTIdKey
promotedTupleTName = libFun (fsLit "promotedTupleT") promotedTupleTIdKey
@@ -2681,11 +2660,6 @@ roleAnnotDIdKey = mkPreludeMiscIdUnique 352
cxtIdKey :: Unique
cxtIdKey = mkPreludeMiscIdUnique 360
--- data Pred = ...
-classPIdKey, equalPIdKey :: Unique
-classPIdKey = mkPreludeMiscIdUnique 361
-equalPIdKey = mkPreludeMiscIdUnique 362
-
-- data Strict = ...
isStrictKey, notStrictKey, unpackedKey :: Unique
isStrictKey = mkPreludeMiscIdUnique 363
@@ -2709,7 +2683,7 @@ varStrictTKey = mkPreludeMiscIdUnique 375
-- data Type = ...
forallTIdKey, varTIdKey, conTIdKey, tupleTIdKey, unboxedTupleTIdKey, arrowTIdKey,
- listTIdKey, appTIdKey, sigTIdKey, litTIdKey,
+ listTIdKey, appTIdKey, sigTIdKey, equalityTIdKey, litTIdKey,
promotedTIdKey, promotedTupleTIdKey,
promotedNilTIdKey, promotedConsTIdKey :: Unique
forallTIdKey = mkPreludeMiscIdUnique 380
@@ -2721,21 +2695,22 @@ arrowTIdKey = mkPreludeMiscIdUnique 385
listTIdKey = mkPreludeMiscIdUnique 386
appTIdKey = mkPreludeMiscIdUnique 387
sigTIdKey = mkPreludeMiscIdUnique 388
-litTIdKey = mkPreludeMiscIdUnique 389
-promotedTIdKey = mkPreludeMiscIdUnique 390
-promotedTupleTIdKey = mkPreludeMiscIdUnique 391
-promotedNilTIdKey = mkPreludeMiscIdUnique 392
-promotedConsTIdKey = mkPreludeMiscIdUnique 393
+equalityTIdKey = mkPreludeMiscIdUnique 389
+litTIdKey = mkPreludeMiscIdUnique 390
+promotedTIdKey = mkPreludeMiscIdUnique 391
+promotedTupleTIdKey = mkPreludeMiscIdUnique 392
+promotedNilTIdKey = mkPreludeMiscIdUnique 393
+promotedConsTIdKey = mkPreludeMiscIdUnique 394
-- data TyLit = ...
numTyLitIdKey, strTyLitIdKey :: Unique
-numTyLitIdKey = mkPreludeMiscIdUnique 394
-strTyLitIdKey = mkPreludeMiscIdUnique 395
+numTyLitIdKey = mkPreludeMiscIdUnique 395
+strTyLitIdKey = mkPreludeMiscIdUnique 396
-- data TyVarBndr = ...
plainTVIdKey, kindedTVIdKey :: Unique
-plainTVIdKey = mkPreludeMiscIdUnique 396
-kindedTVIdKey = mkPreludeMiscIdUnique 397
+plainTVIdKey = mkPreludeMiscIdUnique 397
+kindedTVIdKey = mkPreludeMiscIdUnique 398
-- data Role = ...
nominalRIdKey, representationalRIdKey, phantomRIdKey, inferRIdKey :: Unique
diff --git a/compiler/deSugar/DsMonad.lhs b/compiler/deSugar/DsMonad.lhs
index b590f4b2d29d..c017a7cc0191 100644
--- a/compiler/deSugar/DsMonad.lhs
+++ b/compiler/deSugar/DsMonad.lhs
@@ -6,6 +6,8 @@
@DsMonad@: monadery used in desugaring
\begin{code}
+{-# LANGUAGE FlexibleInstances #-}
+
module DsMonad (
DsM, mapM, mapAndUnzipM,
initDs, initDsTc, fixDs,
diff --git a/compiler/deSugar/DsUtils.lhs b/compiler/deSugar/DsUtils.lhs
index 2ad70c67d360..c52b917efd2e 100644
--- a/compiler/deSugar/DsUtils.lhs
+++ b/compiler/deSugar/DsUtils.lhs
@@ -8,7 +8,8 @@ Utilities for desugaring
This module exports some utility functions of no great interest.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -64,7 +65,6 @@ import ConLike
import DataCon
import PatSyn
import Type
-import Coercion
import TysPrim
import TysWiredIn
import BasicTypes
@@ -638,12 +638,13 @@ mkSelectorBinds ticks pat val_expr
-- efficient too.
-- For the error message we make one error-app, to avoid duplication.
- -- But we need it at different types... so we use coerce for that
- ; err_expr <- mkErrorAppDs iRREFUT_PAT_ERROR_ID unitTy (ppr pat)
- ; err_var <- newSysLocalDs unitTy
- ; binds <- zipWithM (mk_bind val_var err_var) ticks' binders
- ; return ( (val_var, val_expr) :
- (err_var, err_expr) :
+ -- But we need it at different types, so we make it polymorphic:
+ -- err_var = /\a. iRREFUT_PAT_ERR a "blah blah blah"
+ ; err_app <- mkErrorAppDs iRREFUT_PAT_ERROR_ID alphaTy (ppr pat)
+ ; err_var <- newSysLocalDs (mkForAllTy alphaTyVar alphaTy)
+ ; binds <- zipWithM (mk_bind val_var err_var) ticks' binders
+ ; return ( (val_var, val_expr) :
+ (err_var, Lam alphaTyVar err_app) :
binds ) }
| otherwise
@@ -665,14 +666,13 @@ mkSelectorBinds ticks pat val_expr
mk_bind scrut_var err_var tick bndr_var = do
-- (mk_bind sv err_var) generates
- -- bv = case sv of { pat -> bv; other -> coerce (type-of-bv) err_var }
+ -- bv = case sv of { pat -> bv; other -> err_var @ type-of-bv }
-- Remember, pat binds bv
rhs_expr <- matchSimply (Var scrut_var) PatBindRhs pat
(Var bndr_var) error_expr
return (bndr_var, mkOptTickBox tick rhs_expr)
where
- error_expr = mkCast (Var err_var) co
- co = mkUnsafeCo (exprType (Var err_var)) (idType bndr_var)
+ error_expr = Var err_var `App` Type (idType bndr_var)
is_simple_lpat p = is_simple_pat (unLoc p)
@@ -709,8 +709,7 @@ mkLHsVarPatTup bs = mkLHsPatTup (map nlVarPat bs)
mkVanillaTuplePat :: [OutPat Id] -> Boxity -> Pat Id
-- A vanilla tuple pattern simply gets its type from its sub-patterns
-mkVanillaTuplePat pats box
- = TuplePat pats box (mkTupleTy (boxityNormalTupleSort box) (map hsLPatType pats))
+mkVanillaTuplePat pats box = TuplePat pats box (map hsLPatType pats)
-- The Big equivalents for the source tuple expressions
mkBigLHsVarTup :: [Id] -> LHsExpr Id
diff --git a/compiler/deSugar/Match.lhs b/compiler/deSugar/Match.lhs
index 0433d873d55a..a14027862a70 100644
--- a/compiler/deSugar/Match.lhs
+++ b/compiler/deSugar/Match.lhs
@@ -6,6 +6,8 @@
The @match@ function
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Match ( match, matchEquations, matchWrapper, matchSimply, matchSinglePat ) where
#include "HsVersions.h"
@@ -40,7 +42,7 @@ import Maybes
import Util
import Name
import Outputable
-import BasicTypes ( boxityNormalTupleSort )
+import BasicTypes ( boxityNormalTupleSort, isGenerated )
import FastString
import Control.Monad( when )
@@ -552,9 +554,8 @@ tidy1 v (LazyPat pat)
tidy1 _ (ListPat pats ty Nothing)
= return (idDsWrapper, unLoc list_ConPat)
where
- list_ty = mkListTy ty
- list_ConPat = foldr (\ x y -> mkPrefixConPat consDataCon [x, y] list_ty)
- (mkNilPat list_ty)
+ list_ConPat = foldr (\ x y -> mkPrefixConPat consDataCon [x, y] [ty])
+ (mkNilPat ty)
pats
-- Introduce fake parallel array constructors to be able to handle parallel
@@ -563,13 +564,13 @@ tidy1 _ (PArrPat pats ty)
= return (idDsWrapper, unLoc parrConPat)
where
arity = length pats
- parrConPat = mkPrefixConPat (parrFakeCon arity) pats (mkPArrTy ty)
+ parrConPat = mkPrefixConPat (parrFakeCon arity) pats [ty]
-tidy1 _ (TuplePat pats boxity ty)
+tidy1 _ (TuplePat pats boxity tys)
= return (idDsWrapper, unLoc tuple_ConPat)
where
arity = length pats
- tuple_ConPat = mkPrefixConPat (tupleCon (boxityNormalTupleSort boxity) arity) pats ty
+ tuple_ConPat = mkPrefixConPat (tupleCon (boxityNormalTupleSort boxity) arity) pats tys
-- LitPats: we *might* be able to replace these w/ a simpler form
tidy1 _ (LitPat lit)
@@ -586,8 +587,6 @@ tidy1 _ non_interesting_pat
--------------------
tidy_bang_pat :: Id -> SrcSpan -> Pat Id -> DsM (DsWrapper, Pat Id)
--- BangPatterns: Pattern matching is already strict in constructors,
--- tuples etc, so the last case strips off the bang for those patterns.
-- Discard bang around strict pattern
tidy_bang_pat v _ p@(ListPat {}) = tidy1 v p
@@ -596,8 +595,7 @@ tidy_bang_pat v _ p@(PArrPat {}) = tidy1 v p
tidy_bang_pat v _ p@(ConPatOut {}) = tidy1 v p
tidy_bang_pat v _ p@(LitPat {}) = tidy1 v p
--- Discard lazy/par/sig under a bang
-tidy_bang_pat v _ (LazyPat (L l p)) = tidy_bang_pat v l p
+-- Discard par/sig under a bang
tidy_bang_pat v _ (ParPat (L l p)) = tidy_bang_pat v l p
tidy_bang_pat v _ (SigPatOut (L l p) _) = tidy_bang_pat v l p
@@ -607,7 +605,10 @@ tidy_bang_pat v l (AsPat v' p) = tidy1 v (AsPat v' (L l (BangPat p)))
tidy_bang_pat v l (CoPat w p t) = tidy1 v (CoPat w (BangPat (L l p)) t)
-- Default case, leave the bang there:
--- VarPat, WildPat, ViewPat, NPat, NPlusKPat
+-- VarPat, LazyPat, WildPat, ViewPat, NPat, NPlusKPat
+-- For LazyPat, remember that it's semantically like a VarPat
+-- i.e. !(~p) is not like ~p, or p! (Trac #8952)
+
tidy_bang_pat _ l p = return (idDsWrapper, BangPat (L l p))
-- NB: SigPatIn, ConPatIn should not happen
\end{code}
@@ -752,12 +753,14 @@ JJQC 30-Nov-1997
\begin{code}
matchWrapper ctxt (MG { mg_alts = matches
, mg_arg_tys = arg_tys
- , mg_res_ty = rhs_ty })
+ , mg_res_ty = rhs_ty
+ , mg_origin = origin })
= do { eqns_info <- mapM mk_eqn_info matches
; new_vars <- case matches of
[] -> mapM newSysLocalDs arg_tys
(m:_) -> selectMatchVars (map unLoc (hsLMatchPats m))
- ; result_expr <- matchEquations ctxt new_vars eqns_info rhs_ty
+ ; result_expr <- handleWarnings $
+ matchEquations ctxt new_vars eqns_info rhs_ty
; return (new_vars, result_expr) }
where
mk_eqn_info (L _ (Match pats _ grhss))
@@ -765,6 +768,10 @@ matchWrapper ctxt (MG { mg_alts = matches
; match_result <- dsGRHSs ctxt upats grhss rhs_ty
; return (EqnInfo { eqn_pats = upats, eqn_rhs = match_result}) }
+ handleWarnings = if isGenerated origin
+ then discardWarningsDs
+ else id
+
matchEquations :: HsMatchContext Name
-> [Id] -> [EquationInfo] -> Type
diff --git a/compiler/deSugar/MatchCon.lhs b/compiler/deSugar/MatchCon.lhs
index 2b51638bf36b..8e581f66e21a 100644
--- a/compiler/deSugar/MatchCon.lhs
+++ b/compiler/deSugar/MatchCon.lhs
@@ -6,7 +6,8 @@
Pattern-matching constructors
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -124,7 +125,7 @@ matchOneConLike :: [Id]
-> [EquationInfo]
-> DsM (CaseAlt ConLike)
matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
- = do { arg_vars <- selectConMatchVars arg_tys args1
+ = do { arg_vars <- selectConMatchVars val_arg_tys args1
-- Use the first equation as a source of
-- suggestions for the new variables
@@ -140,27 +141,24 @@ matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
alt_wrapper = wrapper1,
alt_result = foldr1 combineMatchResults match_results } }
where
- ConPatOut { pat_con = L _ con1, pat_ty = pat_ty1, pat_wrap = wrapper1,
+ ConPatOut { pat_con = L _ con1, pat_arg_tys = arg_tys, pat_wrap = wrapper1,
pat_tvs = tvs1, pat_dicts = dicts1, pat_args = args1 }
= firstPat eqn1
fields1 = case con1 of
- RealDataCon dcon1 -> dataConFieldLabels dcon1
- PatSynCon{} -> []
-
- arg_tys = inst inst_tys
- where
- inst = case con1 of
- RealDataCon dcon1 -> dataConInstOrigArgTys dcon1
- PatSynCon psyn1 -> patSynInstArgTys psyn1
- inst_tys = tcTyConAppArgs pat_ty1 ++
- mkTyVarTys (takeList exVars tvs1)
- -- Newtypes opaque, hence tcTyConAppArgs
+ RealDataCon dcon1 -> dataConFieldLabels dcon1
+ PatSynCon{} -> []
+
+ val_arg_tys = case con1 of
+ RealDataCon dcon1 -> dataConInstOrigArgTys dcon1 inst_tys
+ PatSynCon psyn1 -> patSynInstArgTys psyn1 inst_tys
+ inst_tys = ASSERT( tvs1 `equalLength` ex_tvs )
+ arg_tys ++ mkTyVarTys tvs1
-- dataConInstOrigArgTys takes the univ and existential tyvars
-- and returns the types of the *value* args, which is what we want
- where
- exVars = case con1 of
- RealDataCon dcon1 -> dataConExTyVars dcon1
- PatSynCon psyn1 -> patSynExTyVars psyn1
+
+ ex_tvs = case con1 of
+ RealDataCon dcon1 -> dataConExTyVars dcon1
+ PatSynCon psyn1 -> patSynExTyVars psyn1
match_group :: [Id] -> [(ConArgPats, EquationInfo)] -> DsM MatchResult
-- All members of the group have compatible ConArgPats
@@ -178,7 +176,7 @@ matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
return ( wrapBinds (tvs `zip` tvs1)
. wrapBinds (ds `zip` dicts1)
. mkCoreLets ds_bind
- , eqn { eqn_pats = conArgPats arg_tys args ++ pats }
+ , eqn { eqn_pats = conArgPats val_arg_tys args ++ pats }
)
shift (_, (EqnInfo { eqn_pats = ps })) = pprPanic "matchOneCon/shift" (ppr ps)
diff --git a/compiler/deSugar/MatchLit.lhs b/compiler/deSugar/MatchLit.lhs
index 7429a613d989..71a5e10636f6 100644
--- a/compiler/deSugar/MatchLit.lhs
+++ b/compiler/deSugar/MatchLit.lhs
@@ -6,6 +6,8 @@
Pattern-matching literal patterns
\begin{code}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
module MatchLit ( dsLit, dsOverLit, hsLitKey, hsOverLitKey
, tidyLitPat, tidyNPat
, matchLiterals, matchNPlusKPats, matchNPats
@@ -90,7 +92,7 @@ dsLit (HsInt i) = do dflags <- getDynFlags
dsLit (HsRat r ty) = do
num <- mkIntegerExpr (numerator (fl_value r))
denom <- mkIntegerExpr (denominator (fl_value r))
- return (mkConApp ratio_data_con [Type integer_ty, num, denom])
+ return (mkCoreConApps ratio_data_con [Type integer_ty, num, denom])
where
(ratio_data_con, integer_ty)
= case tcSplitTyConApp ty of
@@ -264,8 +266,8 @@ tidyLitPat :: HsLit -> Pat Id
tidyLitPat (HsChar c) = unLoc (mkCharLitPat c)
tidyLitPat (HsString s)
| lengthFS s <= 1 -- Short string literals only
- = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mkCharLitPat c, pat] stringTy)
- (mkNilPat stringTy) (unpackFS s)
+ = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mkCharLitPat c, pat] [charTy])
+ (mkNilPat charTy) (unpackFS s)
-- The stringTy is the type of the whole pattern, not
-- the type to instantiate (:) or [] with!
tidyLitPat lit = LitPat lit
@@ -297,7 +299,7 @@ tidyNPat tidy_lit_pat (OverLit val False _ ty) mb_neg _
| isStringTy ty, Just str_lit <- mb_str_lit = tidy_lit_pat (HsString str_lit)
where
mk_con_pat :: DataCon -> HsLit -> Pat Id
- mk_con_pat con lit = unLoc (mkPrefixConPat con [noLoc $ LitPat lit] ty)
+ mk_con_pat con lit = unLoc (mkPrefixConPat con [noLoc $ LitPat lit] [])
mb_int_lit :: Maybe Integer
mb_int_lit = case (mb_neg, val) of
diff --git a/compiler/ghc.cabal.in b/compiler/ghc.cabal.in
index a5d9785a4307..5281b11b91d7 100644
--- a/compiler/ghc.cabal.in
+++ b/compiler/ghc.cabal.in
@@ -18,7 +18,7 @@ Description:
through this package.
Category: Development
Build-Type: Simple
-Cabal-Version: >= 1.2.3
+Cabal-Version: >=1.10
Flag ghci
Description: Build GHCi support.
@@ -41,6 +41,7 @@ Flag stage3
Manual: True
Library
+ Default-Language: Haskell2010
Exposed: False
Build-Depends: base >= 4 && < 5,
@@ -53,7 +54,9 @@ Library
filepath >= 1 && < 1.4,
Cabal,
hpc,
- transformers
+ transformers,
+ bin-package-db,
+ hoopl
if flag(stage1) && impl(ghc < 7.5)
Build-Depends: old-time >= 1 && < 1.2
@@ -70,19 +73,44 @@ Library
CPP-Options: -DGHCI
Include-Dirs: ../rts/dist/build @FFIIncludeDir@
- Build-Depends: bin-package-db
- Build-Depends: hoopl
-
- Extensions: CPP, MagicHash, UnboxedTuples, PatternGuards,
- ForeignFunctionInterface, EmptyDataDecls,
- TypeSynonymInstances, MultiParamTypeClasses,
- FlexibleInstances, RankNTypes, ScopedTypeVariables,
- DeriveDataTypeable, BangPatterns
- if impl(ghc >= 7.1)
- Extensions: NondecreasingIndentation
+ Other-Extensions:
+ BangPatterns
+ CPP
+ DataKinds
+ DeriveDataTypeable
+ DeriveFoldable
+ DeriveFunctor
+ DeriveTraversable
+ DisambiguateRecordFields
+ ExplicitForAll
+ FlexibleContexts
+ FlexibleInstances
+ GADTs
+ GeneralizedNewtypeDeriving
+ MagicHash
+ MultiParamTypeClasses
+ NamedFieldPuns
+ NondecreasingIndentation
+ RankNTypes
+ RecordWildCards
+ ScopedTypeVariables
+ StandaloneDeriving
+ Trustworthy
+ TupleSections
+ TypeFamilies
+ TypeSynonymInstances
+ UnboxedTuples
+ UndecidableInstances
Include-Dirs: . parser utils
+ if impl( ghc >= 7.9 )
+ -- We need to set the package key to ghc (without a version number)
+ -- as it's magic. But we can't set it for old versions of GHC (e.g.
+ -- when bootstrapping) because those versions of GHC don't understand
+ -- that GHC is wired-in.
+ GHC-Options: -this-package-key ghc
+
if flag(stage1)
Include-Dirs: stage1
else
@@ -96,8 +124,6 @@ Library
c-sources:
parser/cutils.c
-
- c-sources:
ghci/keepCAFsForGHCi.c
cbits/genSym.c
@@ -134,6 +160,7 @@ Library
DataCon
PatSyn
Demand
+ Debug
Exception
GhcMonad
Hooks
@@ -165,6 +192,7 @@ Library
Var
VarEnv
VarSet
+ UnVarGraph
BlockId
CLabel
Cmm
@@ -183,7 +211,6 @@ Library
CmmOpt
CmmParse
CmmProcPoint
- CmmRewriteAssignments
CmmSink
CmmType
CmmUtils
@@ -232,11 +259,8 @@ Library
CoreTidy
CoreUnfold
CoreUtils
- ExternalCore
MkCore
- MkExternalCore
PprCore
- PprExternalCore
Check
Coverage
Desugar
@@ -303,12 +327,9 @@ Library
TidyPgm
Ctype
HaddockUtils
- LexCore
Lexer
OptCoercion
Parser
- ParserCore
- ParserCoreUtils
RdrHsSyn
ForeignCall
PrelInfo
@@ -351,6 +372,7 @@ Library
CoreToStg
StgLint
StgSyn
+ CallArity
DmdAnal
WorkWrap
WwLib
@@ -532,6 +554,9 @@ Library
RegAlloc.Linear.X86_64.FreeRegs
RegAlloc.Linear.PPC.FreeRegs
RegAlloc.Linear.SPARC.FreeRegs
+ Dwarf
+ Dwarf.Types
+ Dwarf.Constants
if flag(ghci)
Exposed-Modules:
diff --git a/compiler/ghc.mk b/compiler/ghc.mk
index 0a1871307e57..d23d1fe5b607 100644
--- a/compiler/ghc.mk
+++ b/compiler/ghc.mk
@@ -99,8 +99,6 @@ endif
@echo 'cGhcEnableTablesNextToCode = "$(GhcEnableTablesNextToCode)"' >> $@
@echo 'cLeadingUnderscore :: String' >> $@
@echo 'cLeadingUnderscore = "$(LeadingUnderscore)"' >> $@
- @echo 'cRAWCPP_FLAGS :: String' >> $@
- @echo 'cRAWCPP_FLAGS = "$(RAWCPP_FLAGS)"' >> $@
@echo 'cGHC_UNLIT_PGM :: String' >> $@
@echo 'cGHC_UNLIT_PGM = "$(utils/unlit_dist_PROG)"' >> $@
@echo 'cGHC_SPLIT_PGM :: String' >> $@
@@ -353,6 +351,11 @@ else
compiler_CONFIGURE_OPTS += --ghc-option=-DNO_REGS
endif
+ifneq "$(GhcWithSMP)" "YES"
+compiler_CONFIGURE_OPTS += --ghc-option=-DNOSMP
+compiler_CONFIGURE_OPTS += --ghc-option=-optc-DNOSMP
+endif
+
# Careful optimisation of the parser: we don't want to throw everything
# at it, because that takes too long and doesn't buy much, but we do want
# to inline certain key external functions, so we instruct GHC not to
@@ -434,8 +437,14 @@ ifeq "$(compiler_stage1_VERSION_MUNGED)" "YES"
compiler_stage1_MUNGED_VERSION = $(subst .$(ProjectPatchLevel),,$(ProjectVersion))
define compiler_PACKAGE_MAGIC
compiler_stage1_VERSION = $(compiler_stage1_MUNGED_VERSION)
+compiler_stage1_PACKAGE_KEY = $(subst .$(ProjectPatchLevel),,$(compiler_stage1_PACKAGE_KEY))
endef
+# NB: the PACKAGE_KEY munging has no effect for new-style package keys
+# (which indeed, have nothing version like in them, but are important for
+# old-style package keys which do.) The subst operation is idempotent, so
+# as long as we do it at least once we should be good.
+
# Don't register the non-munged package
compiler_stage1_REGISTER_PACKAGE = NO
@@ -662,9 +671,9 @@ compiler_stage2_CONFIGURE_OPTS += --disable-library-for-ghci
compiler_stage3_CONFIGURE_OPTS += --disable-library-for-ghci
# after build-package, because that sets compiler_stage1_HC_OPTS:
-compiler_stage1_HC_OPTS += $(GhcStage1HcOpts)
-compiler_stage2_HC_OPTS += $(GhcStage2HcOpts)
-compiler_stage3_HC_OPTS += $(GhcStage3HcOpts)
+compiler_stage1_HC_OPTS += $(GhcHcOpts) $(GhcStage1HcOpts)
+compiler_stage2_HC_OPTS += $(GhcHcOpts) $(GhcStage2HcOpts)
+compiler_stage3_HC_OPTS += $(GhcHcOpts) $(GhcStage3HcOpts)
ifneq "$(BINDIST)" "YES"
diff --git a/compiler/ghci/ByteCodeAsm.lhs b/compiler/ghci/ByteCodeAsm.lhs
index 9ec783a40deb..52d6adde863d 100644
--- a/compiler/ghci/ByteCodeAsm.lhs
+++ b/compiler/ghci/ByteCodeAsm.lhs
@@ -5,8 +5,8 @@
ByteCodeLink: Bytecode assembler and linker
\begin{code}
-{-# OPTIONS -optc-DNON_POSIX_SOURCE #-}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP, MagicHash #-}
+{-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE #-}
module ByteCodeAsm (
assembleBCOs, assembleBCO,
diff --git a/compiler/ghci/ByteCodeGen.lhs b/compiler/ghci/ByteCodeGen.lhs
index 58612e2e4898..645a0d8118d1 100644
--- a/compiler/ghci/ByteCodeGen.lhs
+++ b/compiler/ghci/ByteCodeGen.lhs
@@ -5,13 +5,7 @@
ByteCodeGen: Generate bytecode from Core
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP, MagicHash #-}
module ByteCodeGen ( UnlinkedBCO, byteCodeGen, coreExprToBCOs ) where
#include "HsVersions.h"
@@ -277,7 +271,7 @@ collect :: AnnExpr Id VarSet -> ([Var], AnnExpr' Id VarSet)
collect (_, e) = go [] e
where
go xs e | Just e' <- bcView e = go xs e'
- go xs (AnnLam x (_,e))
+ go xs (AnnLam x (_,e))
| UbxTupleRep _ <- repType (idType x)
= unboxedTupleException
| otherwise
@@ -819,8 +813,8 @@ doCase d s p (_,scrut) bndr alts is_unboxed_tuple
MASSERT(isAlgCase)
rhs_code <- schemeE (d_alts + size) s p' rhs
return (my_discr alt, unitOL (UNPACK (trunc16 size)) `appOL` rhs_code)
- where
- real_bndrs = filterOut isTyVar bndrs
+ where
+ real_bndrs = filterOut isTyVar bndrs
my_discr (DEFAULT, _, _) = NoDiscr {-shouldn't really happen-}
my_discr (DataAlt dc, _, _)
@@ -933,6 +927,11 @@ generateCCall d0 s p (CCallSpec target cconv safety) fn args_r_to_l
code <- parg_ArrayishRep (fromIntegral (arrPtrsHdrSize dflags)) d p a
return ((code,AddrRep):rest)
+ | t == smallArrayPrimTyCon || t == smallMutableArrayPrimTyCon
+ -> do rest <- pargs (d + fromIntegral addr_sizeW) az
+ code <- parg_ArrayishRep (fromIntegral (smallArrPtrsHdrSize dflags)) d p a
+ return ((code,AddrRep):rest)
+
| t == byteArrayPrimTyCon || t == mutableByteArrayPrimTyCon
-> do rest <- pargs (d + fromIntegral addr_sizeW) az
code <- parg_ArrayishRep (fromIntegral (arrWordsHdrSize dflags)) d p a
@@ -1247,8 +1246,8 @@ pushAtom d p e
| Just e' <- bcView e
= pushAtom d p e'
-pushAtom _ _ (AnnCoercion {}) -- Coercions are zero-width things,
- = return (nilOL, 0) -- treated just like a variable V
+pushAtom _ _ (AnnCoercion {}) -- Coercions are zero-width things,
+ = return (nilOL, 0) -- treated just like a variable V
pushAtom d p (AnnVar v)
| UnaryRep rep_ty <- repType (idType v)
@@ -1558,12 +1557,12 @@ isVAtom :: AnnExpr' Var ann -> Bool
isVAtom e | Just e' <- bcView e = isVAtom e'
isVAtom (AnnVar v) = isVoidArg (bcIdArgRep v)
isVAtom (AnnCoercion {}) = True
-isVAtom _ = False
+isVAtom _ = False
atomPrimRep :: AnnExpr' Id ann -> PrimRep
atomPrimRep e | Just e' <- bcView e = atomPrimRep e'
-atomPrimRep (AnnVar v) = bcIdPrimRep v
-atomPrimRep (AnnLit l) = typePrimRep (literalType l)
+atomPrimRep (AnnVar v) = bcIdPrimRep v
+atomPrimRep (AnnLit l) = typePrimRep (literalType l)
atomPrimRep (AnnCoercion {}) = VoidRep
atomPrimRep other = pprPanic "atomPrimRep" (ppr (deAnnotate (undefined,other)))
diff --git a/compiler/ghci/ByteCodeInstr.lhs b/compiler/ghci/ByteCodeInstr.lhs
index 005a430cd993..5535d58453f9 100644
--- a/compiler/ghci/ByteCodeInstr.lhs
+++ b/compiler/ghci/ByteCodeInstr.lhs
@@ -4,23 +4,16 @@
ByteCodeInstrs: Bytecode instruction definitions
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP, MagicHash #-}
{-# OPTIONS_GHC -funbox-strict-fields #-}
-
-module ByteCodeInstr (
- BCInstr(..), ProtoBCO(..), bciStackUse, BreakInfo (..)
+module ByteCodeInstr (
+ BCInstr(..), ProtoBCO(..), bciStackUse, BreakInfo (..)
) where
#include "HsVersions.h"
#include "../includes/MachDeps.h"
-import ByteCodeItbls ( ItblPtr )
+import ByteCodeItbls ( ItblPtr )
import StgCmmLayout ( ArgRep(..) )
import PprCore
@@ -43,17 +36,17 @@ import Data.Word
-- ----------------------------------------------------------------------------
-- Bytecode instructions
-data ProtoBCO a
- = ProtoBCO {
- protoBCOName :: a, -- name, in some sense
- protoBCOInstrs :: [BCInstr], -- instrs
- -- arity and GC info
- protoBCOBitmap :: [StgWord],
- protoBCOBitmapSize :: Word16,
- protoBCOArity :: Int,
- -- what the BCO came from
- protoBCOExpr :: Either [AnnAlt Id VarSet] (AnnExpr Id VarSet),
- -- malloc'd pointers
+data ProtoBCO a
+ = ProtoBCO {
+ protoBCOName :: a, -- name, in some sense
+ protoBCOInstrs :: [BCInstr], -- instrs
+ -- arity and GC info
+ protoBCOBitmap :: [StgWord],
+ protoBCOBitmapSize :: Word16,
+ protoBCOArity :: Int,
+ -- what the BCO came from
+ protoBCOExpr :: Either [AnnAlt Id VarSet] (AnnExpr Id VarSet),
+ -- malloc'd pointers
protoBCOPtrs :: [Either ItblPtr (Ptr ())]
}
@@ -79,14 +72,14 @@ data BCInstr
-- Pushing literals
| PUSH_UBX (Either Literal (Ptr ())) Word16
- -- push this int/float/double/addr, on the stack. Word16
- -- is # of words to copy from literal pool. Eitherness reflects
- -- the difficulty of dealing with MachAddr here, mostly due to
- -- the excessive (and unnecessary) restrictions imposed by the
- -- designers of the new Foreign library. In particular it is
- -- quite impossible to convert an Addr to any other integral
- -- type, and it appears impossible to get hold of the bits of
- -- an addr, even though we need to assemble BCOs.
+ -- push this int/float/double/addr, on the stack. Word16
+ -- is # of words to copy from literal pool. Eitherness reflects
+ -- the difficulty of dealing with MachAddr here, mostly due to
+ -- the excessive (and unnecessary) restrictions imposed by the
+ -- designers of the new Foreign library. In particular it is
+ -- quite impossible to convert an Addr to any other integral
+ -- type, and it appears impossible to get hold of the bits of
+ -- an addr, even though we need to assemble BCOs.
-- various kinds of application
| PUSH_APPLY_N
@@ -111,8 +104,8 @@ data BCInstr
| MKPAP !Word16{-ptr to PAP is this far down stack-} !Word16{-number of words-}
| UNPACK !Word16 -- unpack N words from t.o.s Constr
| PACK DataCon !Word16
- -- after assembly, the DataCon is an index into the
- -- itbl array
+ -- after assembly, the DataCon is an index into the
+ -- itbl array
-- For doing case trees
| LABEL LocalLabel
| TESTLT_I Int LocalLabel
@@ -146,13 +139,13 @@ data BCInstr
-- To Infinity And Beyond
| ENTER
- | RETURN -- return a lifted value
+ | RETURN -- return a lifted value
| RETURN_UBX ArgRep -- return an unlifted value, here's its rep
- -- Breakpoints
+ -- Breakpoints
| BRK_FUN (MutableByteArray# RealWorld) Word16 BreakInfo
-data BreakInfo
+data BreakInfo
= BreakInfo
{ breakInfo_module :: Module
, breakInfo_number :: {-# UNPACK #-} !Int
@@ -172,8 +165,8 @@ instance Outputable BreakInfo where
instance Outputable a => Outputable (ProtoBCO a) where
ppr (ProtoBCO name instrs bitmap bsize arity origin malloced)
- = (text "ProtoBCO" <+> ppr name <> char '#' <> int arity
- <+> text (show malloced) <> colon)
+ = (text "ProtoBCO" <+> ppr name <> char '#' <> int arity
+ <+> text (show malloced) <> colon)
$$ nest 3 (case origin of
Left alts -> vcat (zipWith (<+>) (char '{' : repeat (char ';'))
(map (pprCoreAltShort.deAnnAlt) alts)) <+> char '}'
@@ -211,8 +204,8 @@ instance Outputable BCInstr where
ppr (PUSH_L offset) = text "PUSH_L " <+> ppr offset
ppr (PUSH_LL o1 o2) = text "PUSH_LL " <+> ppr o1 <+> ppr o2
ppr (PUSH_LLL o1 o2 o3) = text "PUSH_LLL" <+> ppr o1 <+> ppr o2 <+> ppr o3
- ppr (PUSH_G nm) = text "PUSH_G " <+> ppr nm
- ppr (PUSH_PRIMOP op) = text "PUSH_G " <+> text "GHC.PrimopWrappers."
+ ppr (PUSH_G nm) = text "PUSH_G " <+> ppr nm
+ ppr (PUSH_PRIMOP op) = text "PUSH_G " <+> text "GHC.PrimopWrappers."
<> ppr op
ppr (PUSH_BCO bco) = hang (text "PUSH_BCO") 2 (ppr bco)
ppr (PUSH_ALTS bco) = hang (text "PUSH_ALTS") 2 (ppr bco)
@@ -220,23 +213,23 @@ instance Outputable BCInstr where
ppr (PUSH_UBX (Left lit) nw) = text "PUSH_UBX" <+> parens (ppr nw) <+> ppr lit
ppr (PUSH_UBX (Right aa) nw) = text "PUSH_UBX" <+> parens (ppr nw) <+> text (show aa)
- ppr PUSH_APPLY_N = text "PUSH_APPLY_N"
- ppr PUSH_APPLY_V = text "PUSH_APPLY_V"
- ppr PUSH_APPLY_F = text "PUSH_APPLY_F"
- ppr PUSH_APPLY_D = text "PUSH_APPLY_D"
- ppr PUSH_APPLY_L = text "PUSH_APPLY_L"
- ppr PUSH_APPLY_P = text "PUSH_APPLY_P"
- ppr PUSH_APPLY_PP = text "PUSH_APPLY_PP"
- ppr PUSH_APPLY_PPP = text "PUSH_APPLY_PPP"
- ppr PUSH_APPLY_PPPP = text "PUSH_APPLY_PPPP"
- ppr PUSH_APPLY_PPPPP = text "PUSH_APPLY_PPPPP"
- ppr PUSH_APPLY_PPPPPP = text "PUSH_APPLY_PPPPPP"
+ ppr PUSH_APPLY_N = text "PUSH_APPLY_N"
+ ppr PUSH_APPLY_V = text "PUSH_APPLY_V"
+ ppr PUSH_APPLY_F = text "PUSH_APPLY_F"
+ ppr PUSH_APPLY_D = text "PUSH_APPLY_D"
+ ppr PUSH_APPLY_L = text "PUSH_APPLY_L"
+ ppr PUSH_APPLY_P = text "PUSH_APPLY_P"
+ ppr PUSH_APPLY_PP = text "PUSH_APPLY_PP"
+ ppr PUSH_APPLY_PPP = text "PUSH_APPLY_PPP"
+ ppr PUSH_APPLY_PPPP = text "PUSH_APPLY_PPPP"
+ ppr PUSH_APPLY_PPPPP = text "PUSH_APPLY_PPPPP"
+ ppr PUSH_APPLY_PPPPPP = text "PUSH_APPLY_PPPPPP"
ppr (SLIDE n d) = text "SLIDE " <+> ppr n <+> ppr d
ppr (ALLOC_AP sz) = text "ALLOC_AP " <+> ppr sz
ppr (ALLOC_AP_NOUPD sz) = text "ALLOC_AP_NOUPD " <+> ppr sz
ppr (ALLOC_PAP arity sz) = text "ALLOC_PAP " <+> ppr arity <+> ppr sz
- ppr (MKAP offset sz) = text "MKAP " <+> ppr sz <+> text "words,"
+ ppr (MKAP offset sz) = text "MKAP " <+> ppr sz <+> text "words,"
<+> ppr offset <+> text "stkoff"
ppr (MKPAP offset sz) = text "MKPAP " <+> ppr sz <+> text "words,"
<+> ppr offset <+> text "stkoff"
@@ -255,8 +248,8 @@ instance Outputable BCInstr where
ppr (TESTEQ_P i lab) = text "TESTEQ_P" <+> ppr i <+> text "__" <> ppr lab
ppr CASEFAIL = text "CASEFAIL"
ppr (JMP lab) = text "JMP" <+> ppr lab
- ppr (CCALL off marshall_addr int) = text "CCALL " <+> ppr off
- <+> text "marshall code at"
+ ppr (CCALL off marshall_addr int) = text "CCALL " <+> ppr off
+ <+> text "marshall code at"
<+> text (show marshall_addr)
<+> (if int == 1
then text "(interruptible)"
@@ -264,7 +257,7 @@ instance Outputable BCInstr where
ppr (SWIZZLE stkoff n) = text "SWIZZLE " <+> text "stkoff" <+> ppr stkoff
<+> text "by" <+> ppr n
ppr ENTER = text "ENTER"
- ppr RETURN = text "RETURN"
+ ppr RETURN = text "RETURN"
ppr (RETURN_UBX pk) = text "RETURN_UBX " <+> ppr pk
ppr (BRK_FUN _breakArray index info) = text "BRK_FUN" <+> text "" <+> ppr index <+> ppr info
@@ -283,54 +276,54 @@ protoBCOStackUse bco = sum (map bciStackUse (protoBCOInstrs bco))
bciStackUse :: BCInstr -> Word
bciStackUse STKCHECK{} = 0
-bciStackUse PUSH_L{} = 1
-bciStackUse PUSH_LL{} = 2
+bciStackUse PUSH_L{} = 1
+bciStackUse PUSH_LL{} = 2
bciStackUse PUSH_LLL{} = 3
-bciStackUse PUSH_G{} = 1
+bciStackUse PUSH_G{} = 1
bciStackUse PUSH_PRIMOP{} = 1
-bciStackUse PUSH_BCO{} = 1
+bciStackUse PUSH_BCO{} = 1
bciStackUse (PUSH_ALTS bco) = 2 + protoBCOStackUse bco
bciStackUse (PUSH_ALTS_UNLIFTED bco _) = 2 + protoBCOStackUse bco
bciStackUse (PUSH_UBX _ nw) = fromIntegral nw
-bciStackUse PUSH_APPLY_N{} = 1
-bciStackUse PUSH_APPLY_V{} = 1
-bciStackUse PUSH_APPLY_F{} = 1
-bciStackUse PUSH_APPLY_D{} = 1
-bciStackUse PUSH_APPLY_L{} = 1
-bciStackUse PUSH_APPLY_P{} = 1
-bciStackUse PUSH_APPLY_PP{} = 1
-bciStackUse PUSH_APPLY_PPP{} = 1
-bciStackUse PUSH_APPLY_PPPP{} = 1
-bciStackUse PUSH_APPLY_PPPPP{} = 1
-bciStackUse PUSH_APPLY_PPPPPP{} = 1
+bciStackUse PUSH_APPLY_N{} = 1
+bciStackUse PUSH_APPLY_V{} = 1
+bciStackUse PUSH_APPLY_F{} = 1
+bciStackUse PUSH_APPLY_D{} = 1
+bciStackUse PUSH_APPLY_L{} = 1
+bciStackUse PUSH_APPLY_P{} = 1
+bciStackUse PUSH_APPLY_PP{} = 1
+bciStackUse PUSH_APPLY_PPP{} = 1
+bciStackUse PUSH_APPLY_PPPP{} = 1
+bciStackUse PUSH_APPLY_PPPPP{} = 1
+bciStackUse PUSH_APPLY_PPPPPP{} = 1
bciStackUse ALLOC_AP{} = 1
bciStackUse ALLOC_AP_NOUPD{} = 1
bciStackUse ALLOC_PAP{} = 1
bciStackUse (UNPACK sz) = fromIntegral sz
-bciStackUse LABEL{} = 0
-bciStackUse TESTLT_I{} = 0
-bciStackUse TESTEQ_I{} = 0
-bciStackUse TESTLT_W{} = 0
-bciStackUse TESTEQ_W{} = 0
-bciStackUse TESTLT_F{} = 0
-bciStackUse TESTEQ_F{} = 0
-bciStackUse TESTLT_D{} = 0
-bciStackUse TESTEQ_D{} = 0
-bciStackUse TESTLT_P{} = 0
-bciStackUse TESTEQ_P{} = 0
-bciStackUse CASEFAIL{} = 0
-bciStackUse JMP{} = 0
-bciStackUse ENTER{} = 0
-bciStackUse RETURN{} = 0
-bciStackUse RETURN_UBX{} = 1
-bciStackUse CCALL{} = 0
-bciStackUse SWIZZLE{} = 0
-bciStackUse BRK_FUN{} = 0
+bciStackUse LABEL{} = 0
+bciStackUse TESTLT_I{} = 0
+bciStackUse TESTEQ_I{} = 0
+bciStackUse TESTLT_W{} = 0
+bciStackUse TESTEQ_W{} = 0
+bciStackUse TESTLT_F{} = 0
+bciStackUse TESTEQ_F{} = 0
+bciStackUse TESTLT_D{} = 0
+bciStackUse TESTEQ_D{} = 0
+bciStackUse TESTLT_P{} = 0
+bciStackUse TESTEQ_P{} = 0
+bciStackUse CASEFAIL{} = 0
+bciStackUse JMP{} = 0
+bciStackUse ENTER{} = 0
+bciStackUse RETURN{} = 0
+bciStackUse RETURN_UBX{} = 1
+bciStackUse CCALL{} = 0
+bciStackUse SWIZZLE{} = 0
+bciStackUse BRK_FUN{} = 0
-- These insns actually reduce stack use, but we need the high-tide level,
-- so can't use this info. Not that it matters much.
-bciStackUse SLIDE{} = 0
-bciStackUse MKAP{} = 0
-bciStackUse MKPAP{} = 0
-bciStackUse PACK{} = 1 -- worst case is PACK 0 words
+bciStackUse SLIDE{} = 0
+bciStackUse MKAP{} = 0
+bciStackUse MKPAP{} = 0
+bciStackUse PACK{} = 1 -- worst case is PACK 0 words
\end{code}
diff --git a/compiler/ghci/ByteCodeItbls.lhs b/compiler/ghci/ByteCodeItbls.lhs
index ce6bd01f16e3..7a7a62d98025 100644
--- a/compiler/ghci/ByteCodeItbls.lhs
+++ b/compiler/ghci/ByteCodeItbls.lhs
@@ -4,7 +4,8 @@
ByteCodeItbls: Generate infotables for interpreter-made bytecodes
\begin{code}
-{-# OPTIONS -optc-DNON_POSIX_SOURCE #-}
+{-# LANGUAGE CPP, MagicHash #-}
+{-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE #-}
module ByteCodeItbls ( ItblEnv, ItblPtr(..), itblCode, mkITbls, peekItbl
, StgInfoTable(..)
diff --git a/compiler/ghci/ByteCodeLink.lhs b/compiler/ghci/ByteCodeLink.lhs
index 4c484097f0f9..cbedb717fe76 100644
--- a/compiler/ghci/ByteCodeLink.lhs
+++ b/compiler/ghci/ByteCodeLink.lhs
@@ -5,7 +5,12 @@ ByteCodeLink: Bytecode assembler and linker
\begin{code}
{-# LANGUAGE BangPatterns #-}
-{-# OPTIONS -optc-DNON_POSIX_SOURCE #-}
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE UnboxedTuples #-}
+{-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE #-}
module ByteCodeLink (
ClosureEnv, emptyClosureEnv, extendClosureEnv,
@@ -255,13 +260,13 @@ linkFail who what
-- HACKS!!! ToDo: cleaner
nameToCLabel :: Name -> String{-suffix-} -> String
nameToCLabel n suffix
- = if pkgid /= mainPackageId
+ = if pkgid /= mainPackageKey
then package_part ++ '_': qual_name
else qual_name
where
- pkgid = modulePackageId mod
+ pkgid = modulePackageKey mod
mod = ASSERT( isExternalName n ) nameModule n
- package_part = zString (zEncodeFS (packageIdFS (modulePackageId mod)))
+ package_part = zString (zEncodeFS (packageKeyFS (modulePackageKey mod)))
module_part = zString (zEncodeFS (moduleNameFS (moduleName mod)))
occ_part = zString (zEncodeFS (occNameFS (nameOccName n)))
qual_name = module_part ++ '_':occ_part ++ '_':suffix
diff --git a/compiler/ghci/Debugger.hs b/compiler/ghci/Debugger.hs
index 0807bf17b51c..49667141819b 100644
--- a/compiler/ghci/Debugger.hs
+++ b/compiler/ghci/Debugger.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE MagicHash #-}
+
-----------------------------------------------------------------------------
--
-- GHCi Interactive debugging commands
diff --git a/compiler/ghci/DebuggerUtils.hs b/compiler/ghci/DebuggerUtils.hs
index 3d73e69e2bfe..9ccb11331497 100644
--- a/compiler/ghci/DebuggerUtils.hs
+++ b/compiler/ghci/DebuggerUtils.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
module DebuggerUtils (
dataConInfoPtrToName,
) where
@@ -44,7 +46,7 @@ dataConInfoPtrToName x = do
modFS = mkFastStringByteList mod
occFS = mkFastStringByteList occ
occName = mkOccNameFS OccName.dataName occFS
- modName = mkModule (fsToPackageId pkgFS) (mkModuleNameFS modFS)
+ modName = mkModule (fsToPackageKey pkgFS) (mkModuleNameFS modFS)
return (Left $ showSDoc dflags $ ppr modName <> dot <> ppr occName)
`recoverM` (Right `fmap` lookupOrig modName occName)
diff --git a/compiler/ghci/Linker.lhs b/compiler/ghci/Linker.lhs
index eb3e226ab4c6..40b83bbbaef4 100644
--- a/compiler/ghci/Linker.lhs
+++ b/compiler/ghci/Linker.lhs
@@ -2,15 +2,16 @@
% (c) The University of Glasgow 2005-2012
%
\begin{code}
+{-# LANGUAGE CPP, NondecreasingIndentation #-}
+{-# OPTIONS_GHC -fno-cse #-}
+-- -fno-cse is needed for GLOBAL_VAR's to behave properly
+
-- | The dynamic linker for GHCi.
--
-- This module deals with the top-level issues of dynamic linking,
-- calling the object-code linker and the byte-code linker where
-- necessary.
-{-# OPTIONS -fno-cse #-}
--- -fno-cse is needed for GLOBAL_VAR's to behave properly
-
module Linker ( getHValue, showLinkerState,
linkExpr, linkDecls, unload, withExtendedLinkEnv,
extendLinkEnv, deleteFromLinkEnv,
@@ -58,7 +59,6 @@ import Control.Monad
import Data.IORef
import Data.List
-import qualified Data.Map as Map
import Control.Concurrent.MVar
import System.FilePath
@@ -69,7 +69,7 @@ import System.Directory hiding (findFile)
import System.Directory
#endif
-import Distribution.Package hiding (depends, PackageId)
+import Distribution.Package hiding (depends, mkPackageKey, PackageKey)
import Exception
\end{code}
@@ -123,7 +123,7 @@ data PersistentLinkerState
-- The currently-loaded packages; always object code
-- Held, as usual, in dependency order; though I am not sure if
-- that is really important
- pkgs_loaded :: ![PackageId]
+ pkgs_loaded :: ![PackageKey]
}
emptyPLS :: DynFlags -> PersistentLinkerState
@@ -139,10 +139,10 @@ emptyPLS _ = PersistentLinkerState {
--
-- The linker's symbol table is populated with RTS symbols using an
-- explicit list. See rts/Linker.c for details.
- where init_pkgs = [rtsPackageId]
+ where init_pkgs = [rtsPackageKey]
-extendLoadedPkgs :: [PackageId] -> IO ()
+extendLoadedPkgs :: [PackageKey] -> IO ()
extendLoadedPkgs pkgs =
modifyPLS_ $ \s ->
return s{ pkgs_loaded = pkgs ++ pkgs_loaded s }
@@ -378,7 +378,16 @@ preloadLib dflags lib_paths framework_paths lib_spec
-> do maybe_errstr <- loadDLL (mkSOName platform dll_unadorned)
case maybe_errstr of
Nothing -> maybePutStrLn dflags "done"
- Just mm -> preloadFailed mm lib_paths lib_spec
+ Just mm | platformOS platform /= OSDarwin ->
+ preloadFailed mm lib_paths lib_spec
+ Just mm | otherwise -> do
+ -- As a backup, on Darwin, try to also load a .so file
+ -- since (apparently) some things install that way - see
+ -- ticket #8770.
+ err2 <- loadDLL $ ("lib" ++ dll_unadorned) <.> "so"
+ case err2 of
+ Nothing -> maybePutStrLn dflags "done"
+ Just _ -> preloadFailed mm lib_paths lib_spec
DLLPath dll_path
-> do maybe_errstr <- loadDLL dll_path
@@ -517,7 +526,7 @@ getLinkDeps :: HscEnv -> HomePackageTable
-> Maybe FilePath -- replace object suffices?
-> SrcSpan -- for error messages
-> [Module] -- If you need these
- -> IO ([Linkable], [PackageId]) -- ... then link these first
+ -> IO ([Linkable], [PackageKey]) -- ... then link these first
-- Fails with an IO exception if it can't find enough files
getLinkDeps hsc_env hpt pls replace_osuf span mods
@@ -555,8 +564,8 @@ getLinkDeps hsc_env hpt pls replace_osuf span mods
-- tree recursively. See bug #936, testcase ghci/prog007.
follow_deps :: [Module] -- modules to follow
-> UniqSet ModuleName -- accum. module dependencies
- -> UniqSet PackageId -- accum. package dependencies
- -> IO ([ModuleName], [PackageId]) -- result
+ -> UniqSet PackageKey -- accum. package dependencies
+ -> IO ([ModuleName], [PackageKey]) -- result
follow_deps [] acc_mods acc_pkgs
= return (uniqSetToList acc_mods, uniqSetToList acc_pkgs)
follow_deps (mod:mods) acc_mods acc_pkgs
@@ -570,7 +579,7 @@ getLinkDeps hsc_env hpt pls replace_osuf span mods
when (mi_boot iface) $ link_boot_mod_error mod
let
- pkg = modulePackageId mod
+ pkg = modulePackageKey mod
deps = mi_deps iface
pkg_deps = dep_pkgs deps
@@ -1035,7 +1044,7 @@ showLS (Framework nm) = "(framework) " ++ nm
-- automatically, and it doesn't matter what order you specify the input
-- packages.
--
-linkPackages :: DynFlags -> [PackageId] -> IO ()
+linkPackages :: DynFlags -> [PackageKey] -> IO ()
-- NOTE: in fact, since each module tracks all the packages it depends on,
-- we don't really need to use the package-config dependencies.
--
@@ -1051,16 +1060,13 @@ linkPackages dflags new_pkgs = do
modifyPLS_ $ \pls -> do
linkPackages' dflags new_pkgs pls
-linkPackages' :: DynFlags -> [PackageId] -> PersistentLinkerState
+linkPackages' :: DynFlags -> [PackageKey] -> PersistentLinkerState
-> IO PersistentLinkerState
linkPackages' dflags new_pks pls = do
pkgs' <- link (pkgs_loaded pls) new_pks
return $! pls { pkgs_loaded = pkgs' }
where
- pkg_map = pkgIdMap (pkgState dflags)
- ipid_map = installedPackageIdMap (pkgState dflags)
-
- link :: [PackageId] -> [PackageId] -> IO [PackageId]
+ link :: [PackageKey] -> [PackageKey] -> IO [PackageKey]
link pkgs new_pkgs =
foldM link_one pkgs new_pkgs
@@ -1068,17 +1074,16 @@ linkPackages' dflags new_pks pls = do
| new_pkg `elem` pkgs -- Already linked
= return pkgs
- | Just pkg_cfg <- lookupPackage pkg_map new_pkg
+ | Just pkg_cfg <- lookupPackage dflags new_pkg
= do { -- Link dependents first
- pkgs' <- link pkgs [ Maybes.expectJust "link_one" $
- Map.lookup ipid ipid_map
+ pkgs' <- link pkgs [ resolveInstalledPackageId dflags ipid
| ipid <- depends pkg_cfg ]
-- Now link the package itself
; linkPackage dflags pkg_cfg
; return (new_pkg : pkgs') }
| otherwise
- = throwGhcExceptionIO (CmdLineError ("unknown package: " ++ packageIdString new_pkg))
+ = throwGhcExceptionIO (CmdLineError ("unknown package: " ++ packageKeyString new_pkg))
linkPackage :: DynFlags -> PackageConfig -> IO ()
@@ -1199,7 +1204,9 @@ locateLib dflags is_hs dirs lib
mk_hs_dyn_lib_path dir = dir > mkHsSOName platform hs_dyn_lib_name
so_name = mkSOName platform lib
- mk_dyn_lib_path dir = dir > so_name
+ mk_dyn_lib_path dir = case (arch, os) of
+ (ArchX86_64, OSSolaris2) -> dir > ("64/" ++ so_name)
+ _ -> dir > so_name
findObject = liftM (fmap Object) $ findFile mk_obj_path dirs
findDynObject = liftM (fmap Object) $ findFile mk_dyn_obj_path dirs
@@ -1216,6 +1223,8 @@ locateLib dflags is_hs dirs lib
Nothing -> g
platform = targetPlatform dflags
+ arch = platformArch platform
+ os = platformOS platform
searchForLibUsingGcc :: DynFlags -> String -> [FilePath] -> IO (Maybe FilePath)
searchForLibUsingGcc dflags so dirs = do
diff --git a/compiler/ghci/RtClosureInspect.hs b/compiler/ghci/RtClosureInspect.hs
index 76b845114ac1..dde813d31db6 100644
--- a/compiler/ghci/RtClosureInspect.hs
+++ b/compiler/ghci/RtClosureInspect.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, ScopedTypeVariables, MagicHash, UnboxedTuples #-}
+
-----------------------------------------------------------------------------
--
-- GHC Interactive support for inspecting arbitrary closures at runtime
@@ -5,14 +7,6 @@
-- Pepe Iborra (supported by Google SoC) 2006
--
-----------------------------------------------------------------------------
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module RtClosureInspect(
cvObtainTerm, -- :: HscEnv -> Int -> Bool -> Maybe Type -> HValue -> IO Term
cvReconstructType,
@@ -83,9 +77,9 @@ import System.IO.Unsafe
data Term = Term { ty :: RttiType
, dc :: Either String DataCon
-- Carries a text representation if the datacon is
- -- not exported by the .hi file, which is the case
+ -- not exported by the .hi file, which is the case
-- for private constructors in -O0 compiled libraries
- , val :: HValue
+ , val :: HValue
, subTerms :: [Term] }
| Prim { ty :: RttiType
@@ -140,20 +134,20 @@ instance Outputable (Term) where
-------------------------------------------------------------------------
-- Runtime Closure Datatype and functions for retrieving closure related stuff
-------------------------------------------------------------------------
-data ClosureType = Constr
- | Fun
- | Thunk Int
+data ClosureType = Constr
+ | Fun
+ | Thunk Int
| ThunkSelector
- | Blackhole
- | AP
- | PAP
- | Indirection Int
+ | Blackhole
+ | AP
+ | PAP
+ | Indirection Int
| MutVar Int
| MVar Int
| Other Int
deriving (Show, Eq)
-data Closure = Closure { tipe :: ClosureType
+data Closure = Closure { tipe :: ClosureType
, infoPtr :: Ptr ()
, infoTable :: StgInfoTable
, ptrs :: Array Int HValue
@@ -161,7 +155,7 @@ data Closure = Closure { tipe :: ClosureType
}
instance Outputable ClosureType where
- ppr = text . show
+ ppr = text . show
#include "../includes/rts/storage/ClosureTypes.h"
@@ -173,7 +167,7 @@ pAP_CODE = PAP
getClosureData :: DynFlags -> a -> IO Closure
getClosureData dflags a =
- case unpackClosure# a of
+ case unpackClosure# a of
(# iptr, ptrs, nptrs #) -> do
let iptr'
| ghciTablesNextToCode =
@@ -192,11 +186,11 @@ getClosureData dflags a =
nptrs_data = [W# (indexWordArray# nptrs i)
| I# i <- [0.. fromIntegral (BCI.nptrs itbl)-1] ]
ASSERT(elems >= 0) return ()
- ptrsList `seq`
+ ptrsList `seq`
return (Closure tipe (Ptr iptr) itbl ptrsList nptrs_data)
readCType :: Integral a => a -> ClosureType
-readCType i
+readCType i
| i >= CONSTR && i <= CONSTR_NOCAF_STATIC = Constr
| i >= FUN && i <= FUN_STATIC = Fun
| i >= THUNK && i < THUNK_SELECTOR = Thunk i'
@@ -210,7 +204,7 @@ readCType i
| i == MVAR_CLEAN || i == MVAR_DIRTY = MVar i'
| otherwise = Other i'
where i' = fromIntegral i
-
+
isConstr, isIndirection, isThunk :: ClosureType -> Bool
isConstr Constr = True
isConstr _ = False
@@ -238,7 +232,7 @@ unsafeDeepSeq :: a -> b -> b
unsafeDeepSeq = unsafeDeepSeq1 2
where unsafeDeepSeq1 0 a b = seq a $! b
unsafeDeepSeq1 i a b -- 1st case avoids infinite loops for non reducible thunks
- | not (isConstr tipe) = seq a $! unsafeDeepSeq1 (i-1) a b
+ | not (isConstr tipe) = seq a $! unsafeDeepSeq1 (i-1) a b
-- | unsafePerformIO (isFullyEvaluated a) = b
| otherwise = case unsafePerformIO (getClosureData a) of
closure -> foldl' (flip unsafeDeepSeq) b (ptrs closure)
@@ -313,7 +307,7 @@ mapTermTypeM f = foldTermM TermFoldM {
termTyVars :: Term -> TyVarSet
termTyVars = foldTerm TermFold {
- fTerm = \ty _ _ tt ->
+ fTerm = \ty _ _ tt ->
tyVarsOfType ty `plusVarEnv` concatVarEnv tt,
fSuspension = \_ ty _ _ -> tyVarsOfType ty,
fPrim = \ _ _ -> emptyVarEnv,
@@ -345,21 +339,21 @@ ppr_termM y p Term{dc=Left dc_tag, subTerms=tt} = do
tt_docs <- mapM (y app_prec) tt
return $ cparen (not (null tt) && p >= app_prec)
(text dc_tag <+> pprDeeperList fsep tt_docs)
-
-ppr_termM y p Term{dc=Right dc, subTerms=tt}
+
+ppr_termM y p Term{dc=Right dc, subTerms=tt}
{- | dataConIsInfix dc, (t1:t2:tt') <- tt --TODO fixity
- = parens (ppr_term1 True t1 <+> ppr dc <+> ppr_term1 True ppr t2)
- <+> hsep (map (ppr_term1 True) tt)
+ = parens (ppr_term1 True t1 <+> ppr dc <+> ppr_term1 True ppr t2)
+ <+> hsep (map (ppr_term1 True) tt)
-} -- TODO Printing infix constructors properly
| null sub_terms_to_show
= return (ppr dc)
- | otherwise
+ | otherwise
= do { tt_docs <- mapM (y app_prec) sub_terms_to_show
; return $ cparen (p >= app_prec) $
sep [ppr dc, nest 2 (pprDeeperList fsep tt_docs)] }
where
- sub_terms_to_show -- Don't show the dictionary arguments to
- -- constructors unless -dppr-debug is on
+ sub_terms_to_show -- Don't show the dictionary arguments to
+ -- constructors unless -dppr-debug is on
| opt_PprStyle_Debug = tt
| otherwise = dropList (dataConTheta dc) tt
@@ -376,9 +370,9 @@ ppr_termM _ _ t = ppr_termM1 t
ppr_termM1 :: Monad m => Term -> m SDoc
-ppr_termM1 Prim{value=words, ty=ty} =
+ppr_termM1 Prim{value=words, ty=ty} =
return $ repPrim (tyConAppTyCon ty) words
-ppr_termM1 Suspension{ty=ty, bound_to=Nothing} =
+ppr_termM1 Suspension{ty=ty, bound_to=Nothing} =
return (char '_' <+> ifPprDebug (text "::" <> ppr ty))
ppr_termM1 Suspension{ty=ty, bound_to=Just n}
-- | Just _ <- splitFunTy_maybe ty = return$ ptext (sLit("")
@@ -390,7 +384,7 @@ ppr_termM1 NewtypeWrap{} = panic "ppr_termM1 - NewtypeWrap"
pprNewtypeWrap y p NewtypeWrap{ty=ty, wrapped_term=t}
| Just (tc,_) <- tcSplitTyConApp_maybe ty
, ASSERT(isNewTyCon tc) True
- , Just new_dc <- tyConSingleDataCon_maybe tc = do
+ , Just new_dc <- tyConSingleDataCon_maybe tc = do
real_term <- y max_prec t
return $ cparen (p >= app_prec) (ppr new_dc <+> real_term)
pprNewtypeWrap _ _ _ = panic "pprNewtypeWrap"
@@ -399,11 +393,11 @@ pprNewtypeWrap _ _ _ = panic "pprNewtypeWrap"
-- Custom Term Pretty Printers
-------------------------------------------------------
--- We can want to customize the representation of a
--- term depending on its type.
+-- We can want to customize the representation of a
+-- term depending on its type.
-- However, note that custom printers have to work with
-- type representations, instead of directly with types.
--- We cannot use type classes here, unless we employ some
+-- We cannot use type classes here, unless we employ some
-- typerep trickery (e.g. Weirich's RepLib tricks),
-- which I didn't. Therefore, this code replicates a lot
-- of what type classes provide for free.
@@ -411,7 +405,7 @@ pprNewtypeWrap _ _ _ = panic "pprNewtypeWrap"
type CustomTermPrinter m = TermPrinterM m
-> [Precedence -> Term -> (m (Maybe SDoc))]
--- | Takes a list of custom printers with a explicit recursion knot and a term,
+-- | Takes a list of custom printers with a explicit recursion knot and a term,
-- and returns the output of the first successful printer, or the default printer
cPprTerm :: Monad m => CustomTermPrinter m -> Term -> m SDoc
cPprTerm printers_ = go 0 where
@@ -428,7 +422,7 @@ cPprTerm printers_ = go 0 where
-- Default set of custom printers. Note that the recursion knot is explicit
cPprTermBase :: forall m. Monad m => CustomTermPrinter m
cPprTermBase y =
- [ ifTerm (isTupleTy.ty) (\_p -> liftM (parens . hcat . punctuate comma)
+ [ ifTerm (isTupleTy.ty) (\_p -> liftM (parens . hcat . punctuate comma)
. mapM (y (-1))
. subTerms)
, ifTerm (\t -> isTyCon listTyCon (ty t) && subTerms t `lengthIs` 2)
@@ -439,7 +433,7 @@ cPprTermBase y =
, ifTerm (isTyCon doubleTyCon . ty) ppr_double
, ifTerm (isIntegerTy . ty) ppr_integer
]
- where
+ where
ifTerm :: (Term -> Bool)
-> (Precedence -> Term -> m SDoc)
-> Precedence -> Term -> m (Maybe SDoc)
@@ -447,11 +441,11 @@ cPprTermBase y =
| pred t = Just `liftM` f prec t
ifTerm _ _ _ _ = return Nothing
- isTupleTy ty = fromMaybe False $ do
- (tc,_) <- tcSplitTyConApp_maybe ty
+ isTupleTy ty = fromMaybe False $ do
+ (tc,_) <- tcSplitTyConApp_maybe ty
return (isBoxedTupleTyCon tc)
- isTyCon a_tc ty = fromMaybe False $ do
+ isTyCon a_tc ty = fromMaybe False $ do
(tc,_) <- tcSplitTyConApp_maybe ty
return (a_tc == tc)
@@ -459,7 +453,7 @@ cPprTermBase y =
(tc,_) <- tcSplitTyConApp_maybe ty
return (tyConName tc == integerTyConName)
- ppr_int, ppr_char, ppr_float, ppr_double, ppr_integer
+ ppr_int, ppr_char, ppr_float, ppr_double, ppr_integer
:: Precedence -> Term -> m SDoc
ppr_int _ v = return (Ppr.int (unsafeCoerce# (val v)))
ppr_char _ v = return (Ppr.char '\'' <> Ppr.char (unsafeCoerce# (val v)) <> Ppr.char '\'')
@@ -472,16 +466,16 @@ cPprTermBase y =
ppr_list p (Term{subTerms=[h,t]}) = do
let elems = h : getListTerms t
isConsLast = not(termType(last elems) `eqType` termType h)
- is_string = all (isCharTy . ty) elems
+ is_string = all (isCharTy . ty) elems
print_elems <- mapM (y cons_prec) elems
if is_string
then return (Ppr.doubleQuotes (Ppr.text (unsafeCoerce# (map val elems))))
else if isConsLast
- then return $ cparen (p >= cons_prec)
- $ pprDeeperList fsep
+ then return $ cparen (p >= cons_prec)
+ $ pprDeeperList fsep
$ punctuate (space<>colon) print_elems
- else return $ brackets
+ else return $ brackets
$ pprDeeperList fcat
$ punctuate comma print_elems
@@ -513,16 +507,18 @@ repPrim t = rep where
| t == threadIdPrimTyCon = text ""
| t == weakPrimTyCon = text ""
| t == arrayPrimTyCon = text ""
+ | t == smallArrayPrimTyCon = text ""
| t == byteArrayPrimTyCon = text ""
| t == mutableArrayPrimTyCon = text ""
+ | t == smallMutableArrayPrimTyCon = text ""
| t == mutableByteArrayPrimTyCon = text ""
| t == mutVarPrimTyCon = text ""
| t == mVarPrimTyCon = text ""
| t == tVarPrimTyCon = text ""
| otherwise = char '<' <> ppr t <> char '>'
- where build ww = unsafePerformIO $ withArray ww (peek . castPtr)
--- This ^^^ relies on the representation of Haskell heap values being
--- the same as in a C array.
+ where build ww = unsafePerformIO $ withArray ww (peek . castPtr)
+-- This ^^^ relies on the representation of Haskell heap values being
+-- the same as in a C array.
-----------------------------------
-- Type Reconstruction
@@ -533,14 +529,14 @@ The algorithm walks the heap generating a set of equations, which
are solved with syntactic unification.
A type reconstruction equation looks like:
- =
+ =
The full equation set is generated by traversing all the subterms, starting
from a given term.
The only difficult part is that newtypes are only found in the lhs of equations.
-Right hand sides are missing them. We can either (a) drop them from the lhs, or
-(b) reconstruct them in the rhs when possible.
+Right hand sides are missing them. We can either (a) drop them from the lhs, or
+(b) reconstruct them in the rhs when possible.
The function congruenceNewtypes takes a shot at (b)
-}
@@ -570,7 +566,7 @@ runTR hsc_env thing = do
runTR_maybe :: HscEnv -> TR a -> IO (Maybe a)
runTR_maybe hsc_env thing_inside
- = do { (_errs, res) <- initTc hsc_env HsSrcFile False
+ = do { (_errs, res) <- initTc hsc_env HsSrcFile False
(icInteractiveModule (hsc_IC hsc_env))
thing_inside
; return res }
@@ -579,17 +575,17 @@ traceTR :: SDoc -> TR ()
traceTR = liftTcM . traceOptTcRn Opt_D_dump_rtti
--- Semantically different to recoverM in TcRnMonad
+-- Semantically different to recoverM in TcRnMonad
-- recoverM retains the errors in the first action,
-- whereas recoverTc here does not
recoverTR :: TR a -> TR a -> TR a
-recoverTR recover thing = do
+recoverTR recover thing = do
(_,mb_res) <- tryTcErrs thing
- case mb_res of
+ case mb_res of
Nothing -> recover
Just res -> return res
-trIO :: IO a -> TR a
+trIO :: IO a -> TR a
trIO = liftTcM . liftIO
liftTcM :: TcM a -> TR a
@@ -604,17 +600,17 @@ instTyVars :: [TyVar] -> TR ([TcTyVar], [TcType], TvSubst)
instTyVars = liftTcM . tcInstTyVars
type RttiInstantiation = [(TcTyVar, TyVar)]
- -- Associates the typechecker-world meta type variables
- -- (which are mutable and may be refined), to their
+ -- Associates the typechecker-world meta type variables
+ -- (which are mutable and may be refined), to their
-- debugger-world RuntimeUnk counterparts.
-- If the TcTyVar has not been refined by the runtime type
-- elaboration, then we want to turn it back into the
-- original RuntimeUnk
--- | Returns the instantiated type scheme ty', and the
+-- | Returns the instantiated type scheme ty', and the
-- mapping from new (instantiated) -to- old (skolem) type variables
instScheme :: QuantifiedType -> TR (TcType, RttiInstantiation)
-instScheme (tvs, ty)
+instScheme (tvs, ty)
= liftTcM $ do { (tvs', _, subst) <- tcInstTyVars tvs
; let rtti_inst = [(tv',tv) | (tv',tv) <- tvs' `zip` tvs]
; return (substTy subst ty, rtti_inst) }
@@ -694,7 +690,7 @@ cvObtainTerm hsc_env max_depth force old_ty hval = runTR hsc_env $ do
text "Term obtained: " <> ppr term $$
text "Type obtained: " <> ppr (termType term))
return term
- where
+ where
dflags = hsc_dflags hsc_env
go :: Int -> Type -> Type -> HValue -> TcM Term
@@ -711,7 +707,7 @@ cvObtainTerm hsc_env max_depth force old_ty hval = runTR hsc_env $ do
clos <- trIO $ getClosureData dflags a
return (Suspension (tipe clos) my_ty a Nothing)
go max_depth my_ty old_ty a = do
- let monomorphic = not(isTyVarTy my_ty)
+ let monomorphic = not(isTyVarTy my_ty)
-- This ^^^ is a convention. The ancestor tests for
-- monomorphism and passes a type instead of a tv
clos <- trIO $ getClosureData dflags a
@@ -731,14 +727,14 @@ cvObtainTerm hsc_env max_depth force old_ty hval = runTR hsc_env $ do
MutVar _ | Just (tycon,[world,contents_ty]) <- tcSplitTyConApp_maybe old_ty
-> do
-- Deal with the MutVar# primitive
- -- It does not have a constructor at all,
+ -- It does not have a constructor at all,
-- so we simulate the following one
-- MutVar# :: contents_ty -> MutVar# s contents_ty
traceTR (text "Following a MutVar")
contents_tv <- newVar liftedTypeKind
contents <- trIO$ IO$ \w -> readMutVar# (unsafeCoerce# a) w
ASSERT(isUnliftedTypeKind $ typeKind my_ty) return ()
- (mutvar_ty,_) <- instScheme $ quantifyType $ mkFunTy
+ (mutvar_ty,_) <- instScheme $ quantifyType $ mkFunTy
contents_ty (mkTyConApp tycon [world,contents_ty])
addConstraint (mkFunTy contents_tv my_ty) mutvar_ty
x <- go (pred max_depth) contents_tv contents_ty contents
@@ -758,12 +754,12 @@ cvObtainTerm hsc_env max_depth force old_ty hval = runTR hsc_env $ do
-- In such case, we return a best approximation:
-- ignore the unpointed args, and recover the pointeds
-- This preserves laziness, and should be safe.
- traceTR (text "Not constructor" <+> ppr dcname)
+ traceTR (text "Not constructor" <+> ppr dcname)
let dflags = hsc_dflags hsc_env
tag = showPpr dflags dcname
- vars <- replicateM (length$ elems$ ptrs clos)
+ vars <- replicateM (length$ elems$ ptrs clos)
(newVar liftedTypeKind)
- subTerms <- sequence [appArr (go (pred max_depth) tv tv) (ptrs clos) i
+ subTerms <- sequence [appArr (go (pred max_depth) tv tv) (ptrs clos) i
| (i, tv) <- zip [0..] vars]
return (Term my_ty (Left ('<' : tag ++ ">")) a subTerms)
Just dc -> do
@@ -871,7 +867,7 @@ cvReconstructType hsc_env max_depth old_ty hval = runTR_maybe hsc_env $ do
search _ _ _ 0 = traceTR (text "Failed to reconstruct a type after " <>
int max_depth <> text " steps")
search stop expand l d =
- case viewl l of
+ case viewl l of
EmptyL -> return ()
x :< xx -> unlessM stop $ do
new <- expand x
@@ -917,7 +913,7 @@ findPtrTys i ty
| Just (tc, elem_tys) <- tcSplitTyConApp_maybe ty
, isUnboxedTupleTyCon tc
= findPtrTyss i elem_tys
-
+
| otherwise
= case repType ty of
UnaryRep rep_ty | typePrimRep rep_ty == PtrRep -> return (i + 1, [(i, ty)])
@@ -939,8 +935,7 @@ findPtrTyss i tys = foldM step (i, []) tys
-- The types can contain skolem type variables, which need to be treated as normal vars.
-- In particular, we want them to unify with things.
improveRTTIType :: HscEnv -> RttiType -> RttiType -> Maybe TvSubst
-improveRTTIType _ base_ty new_ty
- = U.tcUnifyTys (const U.BindMe) [base_ty] [new_ty]
+improveRTTIType _ base_ty new_ty = U.tcUnifyTy base_ty new_ty
getDataConArgTys :: DataCon -> Type -> TR [Type]
-- Given the result type ty of a constructor application (D a b c :: ty)
@@ -951,7 +946,7 @@ getDataConArgTys :: DataCon -> Type -> TR [Type]
-- I believe that con_app_ty should not have any enclosing foralls
getDataConArgTys dc con_app_ty
= do { let UnaryRep rep_con_app_ty = repType con_app_ty
- ; traceTR (text "getDataConArgTys 1" <+> (ppr con_app_ty $$ ppr rep_con_app_ty
+ ; traceTR (text "getDataConArgTys 1" <+> (ppr con_app_ty $$ ppr rep_con_app_ty
$$ ppr (tcSplitTyConApp_maybe rep_con_app_ty)))
; (_, _, subst) <- instTyVars (univ_tvs ++ ex_tvs)
; addConstraint rep_con_app_ty (substTy subst (dataConOrigResTy dc))
@@ -972,7 +967,7 @@ Consider a GADT (cf Trac #7386)
...
In getDataConArgTys
-* con_app_ty is the known type (from outside) of the constructor application,
+* con_app_ty is the known type (from outside) of the constructor application,
say D [Int] Int
* The data constructor MkT has a (representation) dataConTyCon = DList,
@@ -981,7 +976,7 @@ In getDataConArgTys
MkT :: a -> DList a (Maybe a)
...
-So the dataConTyCon of the data constructor, DList, differs from
+So the dataConTyCon of the data constructor, DList, differs from
the "outside" type, D. So we can't straightforwardly decompose the
"outside" type, and we end up in the "_" branch of the case.
@@ -1123,9 +1118,9 @@ check2 (_, rtti_ty) (_, old_ty)
-- Dealing with newtypes
--------------------------
{-
- congruenceNewtypes does a parallel fold over two Type values,
- compensating for missing newtypes on both sides.
- This is necessary because newtypes are not present
+ congruenceNewtypes does a parallel fold over two Type values,
+ compensating for missing newtypes on both sides.
+ This is necessary because newtypes are not present
in runtime, but sometimes there is evidence available.
Evidence can come from DataCon signatures or
from compile-time type inference.
@@ -1171,8 +1166,8 @@ congruenceNewtypes lhs rhs = go lhs rhs >>= \rhs' -> return (lhs,rhs')
return (mkFunTy r1' r2')
-- TyconApp Inductive case; this is the interesting bit.
| Just (tycon_l, _) <- tcSplitTyConApp_maybe lhs
- , Just (tycon_r, _) <- tcSplitTyConApp_maybe rhs
- , tycon_l /= tycon_r
+ , Just (tycon_r, _) <- tcSplitTyConApp_maybe rhs
+ , tycon_l /= tycon_r
= upgrade tycon_l r
| otherwise = return r
@@ -1182,7 +1177,7 @@ congruenceNewtypes lhs rhs = go lhs rhs >>= \rhs' -> return (lhs,rhs')
| not (isNewTyCon new_tycon) = do
traceTR (text "(Upgrade) Not matching newtype evidence: " <>
ppr new_tycon <> text " for " <> ppr ty)
- return ty
+ return ty
| otherwise = do
traceTR (text "(Upgrade) upgraded " <> ppr ty <>
text " in presence of newtype evidence " <> ppr new_tycon)
@@ -1190,7 +1185,7 @@ congruenceNewtypes lhs rhs = go lhs rhs >>= \rhs' -> return (lhs,rhs')
let ty' = mkTyConApp new_tycon vars
UnaryRep rep_ty = repType ty'
_ <- liftTcM (unifyType ty rep_ty)
- -- assumes that reptype doesn't ^^^^ touch tyconApp args
+ -- assumes that reptype doesn't ^^^^ touch tyconApp args
return ty'
@@ -1202,7 +1197,7 @@ zonkTerm = foldTermM (TermFoldM
return (Suspension ct ty v b)
, fNewtypeWrapM = \ty dc t -> zonkRttiType ty >>= \ty' ->
return$ NewtypeWrap ty' dc t
- , fRefWrapM = \ty t -> return RefWrap `ap`
+ , fRefWrapM = \ty t -> return RefWrap `ap`
zonkRttiType ty `ap` return t
, fPrimM = (return.) . Prim })
@@ -1211,13 +1206,13 @@ zonkRttiType :: TcType -> TcM Type
-- by skolems, safely out of Meta-tyvar-land
zonkRttiType = zonkTcTypeToType (mkEmptyZonkEnv zonk_unbound_meta)
where
- zonk_unbound_meta tv
+ zonk_unbound_meta tv
= ASSERT( isTcTyVar tv )
do { tv' <- skolemiseUnboundMetaTyVar tv RuntimeUnk
- -- This is where RuntimeUnks are born:
- -- otherwise-unconstrained unification variables are
- -- turned into RuntimeUnks as they leave the
- -- typechecker's monad
+ -- This is where RuntimeUnks are born:
+ -- otherwise-unconstrained unification variables are
+ -- turned into RuntimeUnks as they leave the
+ -- typechecker's monad
; return (mkTyVarTy tv') }
--------------------------------------------------------------------------------
diff --git a/compiler/hsSyn/Convert.lhs b/compiler/hsSyn/Convert.lhs
index 9996e620f008..d722a402e07f 100644
--- a/compiler/hsSyn/Convert.lhs
+++ b/compiler/hsSyn/Convert.lhs
@@ -6,6 +6,8 @@
This module converts Template Haskell syntax into HsSyn
\begin{code}
+{-# LANGUAGE MagicHash #-}
+
module Convert( convertToHsExpr, convertToPat, convertToHsDecls,
convertToHsType,
thRdrNameGuesses ) where
@@ -22,6 +24,7 @@ import SrcLoc
import Type
import qualified Coercion ( Role(..) )
import TysWiredIn
+import TysPrim (eqPrimTyCon)
import BasicTypes as Hs
import ForeignCall
import Unique
@@ -198,13 +201,20 @@ cvtDec (ClassD ctxt cl tvs fds decs)
; unless (null adts')
(failWith $ (ptext (sLit "Default data instance declarations are not allowed:"))
$$ (Outputable.ppr adts'))
+ ; at_defs <- mapM cvt_at_def ats'
; returnL $ TyClD $
ClassDecl { tcdCtxt = cxt', tcdLName = tc', tcdTyVars = tvs'
, tcdFDs = fds', tcdSigs = sigs', tcdMeths = binds'
- , tcdATs = fams', tcdATDefs = ats', tcdDocs = []
+ , tcdATs = fams', tcdATDefs = at_defs, tcdDocs = []
, tcdFVs = placeHolderNames }
-- no docs in TH ^^
}
+ where
+ cvt_at_def :: LTyFamInstDecl RdrName -> CvtM (LTyFamDefltEqn RdrName)
+ -- Very similar to what happens in RdrHsSyn.mkClassDecl
+ cvt_at_def decl = case RdrHsSyn.mkATDefault decl of
+ Right def -> return def
+ Left (_, msg) -> failWith msg
cvtDec (InstanceD ctxt ty decs)
= do { let doc = ptext (sLit "an instance declaration")
@@ -213,7 +223,7 @@ cvtDec (InstanceD ctxt ty decs)
; ctxt' <- cvtContext ctxt
; L loc ty' <- cvtType ty
; let inst_ty' = L loc $ mkImplicitHsForAllTy ctxt' $ L loc ty'
- ; returnL $ InstD (ClsInstD (ClsInstDecl inst_ty' binds' sigs' ats' adts')) }
+ ; returnL $ InstD (ClsInstD (ClsInstDecl inst_ty' binds' sigs' ats' adts' Nothing)) }
cvtDec (ForeignD ford)
= do { ford' <- cvtForD ford
@@ -277,9 +287,9 @@ cvtTySynEqn :: Located RdrName -> TySynEqn -> CvtM (LTyFamInstEqn RdrName)
cvtTySynEqn tc (TySynEqn lhs rhs)
= do { lhs' <- mapM cvtType lhs
; rhs' <- cvtType rhs
- ; returnL $ TyFamInstEqn { tfie_tycon = tc
- , tfie_pats = mkHsWithBndrs lhs'
- , tfie_rhs = rhs' } }
+ ; returnL $ TyFamEqn { tfe_tycon = tc
+ , tfe_pats = mkHsWithBndrs lhs'
+ , tfe_rhs = rhs' } }
----------------
cvt_ci_decs :: MsgDoc -> [TH.Dec]
@@ -300,7 +310,7 @@ cvt_ci_decs doc decs
; unless (null bads) (failWith (mkBadDecMsg doc bads))
--We use FromSource as the origin of the bind
-- because the TH declaration is user-written
- ; return (listToBag (map (\bind -> (FromSource, bind)) binds'), sigs', fams', ats', adts') }
+ ; return (listToBag binds', sigs', fams', ats', adts') }
----------------
cvt_tycl_hdr :: TH.Cxt -> TH.Name -> [TH.TyVarBndr]
@@ -535,9 +545,7 @@ cvtLocalDecs doc ds
; let (binds, prob_sigs) = partitionWith is_bind ds'
; let (sigs, bads) = partitionWith is_sig prob_sigs
; unless (null bads) (failWith (mkBadDecMsg doc bads))
- ; return (HsValBinds (ValBindsIn (toBindBag binds) sigs)) }
- where
- toBindBag = listToBag . map (\bind -> (FromSource, bind))
+ ; return (HsValBinds (ValBindsIn (listToBag binds) sigs)) }
cvtClause :: TH.Clause -> CvtM (Hs.LMatch RdrName (LHsExpr RdrName))
cvtClause (Clause ps body wheres)
@@ -562,10 +570,10 @@ cvtl e = wrapL (cvt e)
cvt (AppE x y) = do { x' <- cvtl x; y' <- cvtl y; return $ HsApp x' y' }
cvt (LamE ps e) = do { ps' <- cvtPats ps; e' <- cvtl e
- ; return $ HsLam (mkMatchGroup [mkSimpleMatch ps' e']) }
+ ; return $ HsLam (mkMatchGroup FromSource [mkSimpleMatch ps' e']) }
cvt (LamCaseE ms) = do { ms' <- mapM cvtMatch ms
; return $ HsLamCase placeHolderType
- (mkMatchGroup ms')
+ (mkMatchGroup FromSource ms')
}
cvt (TupE [e]) = do { e' <- cvtl e; return $ HsPar e' }
-- Note [Dropping constructors]
@@ -581,7 +589,7 @@ cvtl e = wrapL (cvt e)
cvt (LetE ds e) = do { ds' <- cvtLocalDecs (ptext (sLit "a let expression")) ds
; e' <- cvtl e; return $ HsLet ds' e' }
cvt (CaseE e ms) = do { e' <- cvtl e; ms' <- mapM cvtMatch ms
- ; return $ HsCase e' (mkMatchGroup ms') }
+ ; return $ HsCase e' (mkMatchGroup FromSource ms') }
cvt (DoE ss) = cvtHsDo DoExpr ss
cvt (CompE ss) = cvtHsDo ListComp ss
cvt (ArithSeqE dd) = do { dd' <- cvtDD dd; return $ ArithSeq noPostTcExpr Nothing dd' }
@@ -829,8 +837,8 @@ cvtp (TH.LitP l)
| otherwise = do { l' <- cvtLit l; return $ Hs.LitPat l' }
cvtp (TH.VarP s) = do { s' <- vName s; return $ Hs.VarPat s' }
cvtp (TupP [p]) = do { p' <- cvtPat p; return $ ParPat p' } -- Note [Dropping constructors]
-cvtp (TupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Boxed void }
-cvtp (UnboxedTupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Unboxed void }
+cvtp (TupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Boxed [] }
+cvtp (UnboxedTupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Unboxed [] }
cvtp (ConP s ps) = do { s' <- cNameL s; ps' <- cvtPats ps
; return $ ConPatIn s' (PrefixCon ps') }
cvtp (InfixP p1 s p2) = do { s' <- cNameL s; p1' <- cvtPat p1; p2' <- cvtPat p2
@@ -894,16 +902,7 @@ cvtContext :: TH.Cxt -> CvtM (LHsContext RdrName)
cvtContext tys = do { preds' <- mapM cvtPred tys; returnL preds' }
cvtPred :: TH.Pred -> CvtM (LHsType RdrName)
-cvtPred (TH.ClassP cla tys)
- = do { cla' <- if isVarName cla then tName cla else tconName cla
- ; tys' <- mapM cvtType tys
- ; mk_apps (HsTyVar cla') tys'
- }
-cvtPred (TH.EqualP ty1 ty2)
- = do { ty1' <- cvtType ty1
- ; ty2' <- cvtType ty2
- ; returnL $ HsEqTy ty1' ty2'
- }
+cvtPred = cvtType
cvtType :: TH.Type -> CvtM (LHsType RdrName)
cvtType = cvtTypeKind "type"
@@ -983,6 +982,10 @@ cvtTypeKind ty_str ty
ConstraintT
-> returnL (HsTyVar (getRdrName constraintKindTyCon))
+ EqualityT
+ | [x',y'] <- tys' -> returnL (HsEqTy x' y')
+ | otherwise -> mk_apps (HsTyVar (getRdrName eqPrimTyCon)) tys'
+
_ -> failWith (ptext (sLit ("Malformed " ++ ty_str)) <+> text (show ty))
}
@@ -1112,8 +1115,10 @@ thRdrName loc ctxt_ns th_occ th_name
TH.NameQ mod -> (mkRdrQual $! mk_mod mod) $! occ
TH.NameL uniq -> nameRdrName $! (((Name.mkInternalName $! mk_uniq uniq) $! occ) loc)
TH.NameU uniq -> nameRdrName $! (((Name.mkSystemNameAt $! mk_uniq uniq) $! occ) loc)
- TH.NameS | Just name <- isBuiltInOcc ctxt_ns th_occ -> nameRdrName $! name
- | otherwise -> mkRdrUnqual $! occ
+ TH.NameS | Just name <- isBuiltInOcc_maybe occ -> nameRdrName $! name
+ | otherwise -> mkRdrUnqual $! occ
+ -- We check for built-in syntax here, because the TH
+ -- user might have written a (NameS "(,,)"), for example
where
occ :: OccName.OccName
occ = mk_occ ctxt_ns th_occ
@@ -1133,25 +1138,6 @@ thRdrNameGuesses (TH.Name occ flavour)
| otherwise = [OccName.varName, OccName.tvName]
occ_str = TH.occString occ
-isBuiltInOcc :: OccName.NameSpace -> String -> Maybe Name.Name
--- Built in syntax isn't "in scope" so an Unqual RdrName won't do
--- We must generate an Exact name, just as the parser does
-isBuiltInOcc ctxt_ns occ
- = case occ of
- ":" -> Just (Name.getName consDataCon)
- "[]" -> Just (Name.getName nilDataCon)
- "()" -> Just (tup_name 0)
- '(' : ',' : rest -> go_tuple 2 rest
- _ -> Nothing
- where
- go_tuple n ")" = Just (tup_name n)
- go_tuple n (',' : rest) = go_tuple (n+1) rest
- go_tuple _ _ = Nothing
-
- tup_name n
- | OccName.isTcClsNameSpace ctxt_ns = Name.getName (tupleTyCon BoxedTuple n)
- | otherwise = Name.getName (tupleCon BoxedTuple n)
-
-- The packing and unpacking is rather turgid :-(
mk_occ :: OccName.NameSpace -> String -> OccName.OccName
mk_occ ns occ = OccName.mkOccName ns occ
@@ -1164,8 +1150,8 @@ mk_ghc_ns TH.VarName = OccName.varName
mk_mod :: TH.ModName -> ModuleName
mk_mod mod = mkModuleName (TH.modString mod)
-mk_pkg :: TH.PkgName -> PackageId
-mk_pkg pkg = stringToPackageId (TH.pkgString pkg)
+mk_pkg :: TH.PkgName -> PackageKey
+mk_pkg pkg = stringToPackageKey (TH.pkgString pkg)
mk_uniq :: Int# -> Unique
mk_uniq u = mkUniqueGrimily (I# u)
@@ -1179,7 +1165,7 @@ Consider this TH term construction:
; x3 <- TH.newName "x"
; let x = mkName "x" -- mkName :: String -> TH.Name
- -- Builds a NameL
+ -- Builds a NameS
; return (LamE (..pattern [x1,x2]..) $
LamE (VarPat x3) $
diff --git a/compiler/hsSyn/HsBinds.lhs b/compiler/hsSyn/HsBinds.lhs
index e904633eec64..04a72225f1ac 100644
--- a/compiler/hsSyn/HsBinds.lhs
+++ b/compiler/hsSyn/HsBinds.lhs
@@ -89,7 +89,7 @@ type LHsBind id = LHsBindLR id id
type LHsBinds id = LHsBindsLR id id
type HsBind id = HsBindLR id id
-type LHsBindsLR idL idR = Bag (Origin, LHsBindLR idL idR)
+type LHsBindsLR idL idR = Bag (LHsBindLR idL idR)
type LHsBindLR idL idR = Located (HsBindLR idL idR)
data HsBindLR idL idR
@@ -166,13 +166,7 @@ data HsBindLR idL idR
abs_binds :: LHsBinds idL -- ^ Typechecked user bindings
}
- | PatSynBind {
- patsyn_id :: Located idL, -- ^ Name of the pattern synonym
- bind_fvs :: NameSet, -- ^ See Note [Bind free vars]
- patsyn_args :: HsPatSynDetails (Located idR), -- ^ Formal parameter names
- patsyn_def :: LPat idR, -- ^ Right-hand side
- patsyn_dir :: HsPatSynDir idR -- ^ Directionality
- }
+ | PatSynBind (PatSynBind idL idR)
deriving (Data, Typeable)
-- Consider (AbsBinds tvs ds [(ftvs, poly_f, mono_f) binds]
@@ -195,6 +189,14 @@ data ABExport id
, abe_prags :: TcSpecPrags -- ^ SPECIALISE pragmas
} deriving (Data, Typeable)
+data PatSynBind idL idR
+ = PSB { psb_id :: Located idL, -- ^ Name of the pattern synonym
+ psb_fvs :: NameSet, -- ^ See Note [Bind free vars]
+ psb_args :: HsPatSynDetails (Located idR), -- ^ Formal parameter names
+ psb_def :: LPat idR, -- ^ Right-hand side
+ psb_dir :: HsPatSynDir idR -- ^ Directionality
+ } deriving (Data, Typeable)
+
-- | Used for the NameSet in FunBind and PatBind prior to the renamer
placeHolderNames :: NameSet
placeHolderNames = panic "placeHolderNames"
@@ -322,7 +324,7 @@ instance (OutputableBndr idL, OutputableBndr idR) => Outputable (HsValBindsLR id
pprLHsBinds :: (OutputableBndr idL, OutputableBndr idR) => LHsBindsLR idL idR -> SDoc
pprLHsBinds binds
| isEmptyLHsBinds binds = empty
- | otherwise = pprDeclList (map (ppr . snd) (bagToList binds))
+ | otherwise = pprDeclList (map ppr (bagToList binds))
pprLHsBindsForUser :: (OutputableBndr idL, OutputableBndr idR, OutputableBndr id2)
=> LHsBindsLR idL idR -> [LSig id2] -> [SDoc]
@@ -338,7 +340,7 @@ pprLHsBindsForUser binds sigs
decls :: [(SrcSpan, SDoc)]
decls = [(loc, ppr sig) | L loc sig <- sigs] ++
- [(loc, ppr bind) | (_, L loc bind) <- bagToList binds]
+ [(loc, ppr bind) | L loc bind <- bagToList binds]
sort_by_loc decls = sortBy (comparing fst) decls
@@ -437,20 +439,7 @@ ppr_monobind (FunBind { fun_id = fun, fun_infix = inf,
$$ ifPprDebug (pprBndr LetBind (unLoc fun))
$$ pprFunBind (unLoc fun) inf matches
$$ ifPprDebug (ppr wrap)
-ppr_monobind (PatSynBind{ patsyn_id = L _ psyn, patsyn_args = details,
- patsyn_def = pat, patsyn_dir = dir })
- = ppr_lhs <+> ppr_rhs
- where
- ppr_lhs = ptext (sLit "pattern") <+> ppr_details details
- ppr_simple syntax = syntax <+> ppr pat
-
- ppr_details (InfixPatSyn v1 v2) = hsep [ppr v1, pprInfixOcc psyn, ppr v2]
- ppr_details (PrefixPatSyn vs) = hsep (pprPrefixOcc psyn : map ppr vs)
-
- ppr_rhs = case dir of
- Unidirectional -> ppr_simple (ptext (sLit "<-"))
- ImplicitBidirectional -> ppr_simple equals
-
+ppr_monobind (PatSynBind psb) = ppr psb
ppr_monobind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dictvars
, abs_exports = exports, abs_binds = val_binds
, abs_ev_binds = ev_binds })
@@ -467,6 +456,23 @@ instance (OutputableBndr id) => Outputable (ABExport id) where
= vcat [ ppr gbl <+> ptext (sLit "<=") <+> ppr lcl
, nest 2 (pprTcSpecPrags prags)
, nest 2 (ppr wrap)]
+
+instance (OutputableBndr idL, OutputableBndr idR) => Outputable (PatSynBind idL idR) where
+ ppr (PSB{ psb_id = L _ psyn, psb_args = details, psb_def = pat, psb_dir = dir })
+ = ppr_lhs <+> ppr_rhs
+ where
+ ppr_lhs = ptext (sLit "pattern") <+> ppr_details
+ ppr_simple syntax = syntax <+> ppr pat
+
+ (is_infix, ppr_details) = case details of
+ InfixPatSyn v1 v2 -> (True, hsep [ppr v1, pprInfixOcc psyn, ppr v2])
+ PrefixPatSyn vs -> (False, hsep (pprPrefixOcc psyn : map ppr vs))
+
+ ppr_rhs = case dir of
+ Unidirectional -> ppr_simple (ptext (sLit "<-"))
+ ImplicitBidirectional -> ppr_simple equals
+ ExplicitBidirectional mg -> ppr_simple (ptext (sLit "<-")) <+> ptext (sLit "where") $$
+ (nest 2 $ pprFunBind psyn is_infix mg)
\end{code}
@@ -785,10 +791,9 @@ instance Traversable HsPatSynDetails where
traverse f (InfixPatSyn left right) = InfixPatSyn <$> f left <*> f right
traverse f (PrefixPatSyn args) = PrefixPatSyn <$> traverse f args
-data HsPatSynDirLR idL idR
+data HsPatSynDir id
= Unidirectional
| ImplicitBidirectional
+ | ExplicitBidirectional (MatchGroup id (LHsExpr id))
deriving (Data, Typeable)
-
-type HsPatSynDir id = HsPatSynDirLR id id
\end{code}
diff --git a/compiler/hsSyn/HsDecls.lhs b/compiler/hsSyn/HsDecls.lhs
index bae804eb079b..9680c89e9b36 100644
--- a/compiler/hsSyn/HsDecls.lhs
+++ b/compiler/hsSyn/HsDecls.lhs
@@ -23,13 +23,14 @@ module HsDecls (
tyFamInstDeclName, tyFamInstDeclLName,
countTyClDecls, pprTyClDeclFlavour,
tyClDeclLName, tyClDeclTyVars,
+ hsDeclHasCusk, famDeclHasCusk,
FamilyDecl(..), LFamilyDecl,
-- ** Instance declarations
InstDecl(..), LInstDecl, NewOrData(..), FamilyInfo(..),
TyFamInstDecl(..), LTyFamInstDecl, instDeclDataFamInsts,
DataFamInstDecl(..), LDataFamInstDecl, pprDataFamInstFlavour,
- TyFamInstEqn(..), LTyFamInstEqn,
+ TyFamEqn(..), TyFamInstEqn, LTyFamInstEqn, TyFamDefltEqn, LTyFamDefltEqn,
LClsInstDecl, ClsInstDecl(..),
-- ** Standalone deriving declarations
@@ -93,6 +94,7 @@ import Bag
import Data.Data hiding (TyCon)
import Data.Foldable (Foldable)
import Data.Traversable
+import Data.Maybe
\end{code}
%************************************************************************
@@ -472,7 +474,7 @@ data TyClDecl name
tcdSigs :: [LSig name], -- ^ Methods' signatures
tcdMeths :: LHsBinds name, -- ^ Default methods
tcdATs :: [LFamilyDecl name], -- ^ Associated types; ie
- tcdATDefs :: [LTyFamInstDecl name], -- ^ Associated type defaults
+ tcdATDefs :: [LTyFamDefltEqn name], -- ^ Associated type defaults
tcdDocs :: [LDocDecl], -- ^ Haddock docs
tcdFVs :: NameSet
}
@@ -573,7 +575,7 @@ tyFamInstDeclName = unLoc . tyFamInstDeclLName
tyFamInstDeclLName :: OutputableBndr name
=> TyFamInstDecl name -> Located name
tyFamInstDeclLName (TyFamInstDecl { tfid_eqn =
- (L _ (TyFamInstEqn { tfie_tycon = ln })) })
+ (L _ (TyFamEqn { tfe_tycon = ln })) })
= ln
tyClDeclLName :: TyClDecl name -> Located name
@@ -604,8 +606,54 @@ countTyClDecls decls
isNewTy DataDecl{ tcdDataDefn = HsDataDefn { dd_ND = NewType } } = True
isNewTy _ = False
+
+-- | Does this declaration have a complete, user-supplied kind signature?
+-- See Note [Complete user-supplied kind signatures]
+hsDeclHasCusk :: TyClDecl name -> Bool
+hsDeclHasCusk (ForeignType {}) = True
+hsDeclHasCusk (FamDecl { tcdFam = fam_decl }) = famDeclHasCusk fam_decl
+hsDeclHasCusk (SynDecl { tcdTyVars = tyvars, tcdRhs = rhs })
+ = hsTvbAllKinded tyvars && rhs_annotated rhs
+ where
+ rhs_annotated (L _ ty) = case ty of
+ HsParTy lty -> rhs_annotated lty
+ HsKindSig {} -> True
+ _ -> False
+hsDeclHasCusk (DataDecl { tcdTyVars = tyvars }) = hsTvbAllKinded tyvars
+hsDeclHasCusk (ClassDecl { tcdTyVars = tyvars }) = hsTvbAllKinded tyvars
+
+-- | Does this family declaration have a complete, user-supplied kind signature?
+famDeclHasCusk :: FamilyDecl name -> Bool
+famDeclHasCusk (FamilyDecl { fdInfo = ClosedTypeFamily _
+ , fdTyVars = tyvars
+ , fdKindSig = m_sig })
+ = hsTvbAllKinded tyvars && isJust m_sig
+famDeclHasCusk _ = True -- all open families have CUSKs!
\end{code}
+Note [Complete user-supplied kind signatures]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We kind-check declarations differently if they have a complete, user-supplied
+kind signature (CUSK). This is because we can safely generalise a CUSKed
+declaration before checking all of the others, supporting polymorphic recursion.
+See https://ghc.haskell.org/trac/ghc/wiki/GhcKinds/KindInference#Proposednewstrategy
+and #9200 for lots of discussion of how we got here.
+
+A declaration has a CUSK if we can know its complete kind without doing any inference,
+at all. Here are the rules:
+
+ - A class or datatype is said to have a CUSK if and only if all of its type
+variables are annotated. Its result kind is, by construction, Constraint or *
+respectively.
+
+ - A type synonym has a CUSK if and only if all of its type variables and its
+RHS are annotated with kinds.
+
+ - A closed type family is said to have a CUSK if and only if all of its type
+variables and its return type are annotated.
+
+ - An open type family always has a CUSK -- unannotated type variables (and return type) default to *.
+
\begin{code}
instance OutputableBndr name
=> Outputable (TyClDecl name) where
@@ -632,7 +680,7 @@ instance OutputableBndr name
| otherwise -- Laid out
= vcat [ top_matter <+> ptext (sLit "where")
, nest 2 $ pprDeclList (map ppr ats ++
- map ppr at_defs ++
+ map ppr_fam_deflt_eqn at_defs ++
pprLHsBindsForUser methods sigs) ]
where
top_matter = ptext (sLit "class")
@@ -657,7 +705,7 @@ instance (OutputableBndr name) => Outputable (FamilyDecl name) where
ClosedTypeFamily eqns -> ( ptext (sLit "where")
, if null eqns
then ptext (sLit "..")
- else vcat $ map ppr eqns )
+ else vcat $ map ppr_fam_inst_eqn eqns )
_ -> (empty, empty)
pprFlavour :: FamilyInfo name -> SDoc
@@ -678,7 +726,7 @@ pp_vanilla_decl_head thing tyvars context
pp_fam_inst_lhs :: OutputableBndr name
=> Located name
- -> HsWithBndrs [LHsType name]
+ -> HsTyPats name
-> HsContext name
-> SDoc
pp_fam_inst_lhs thing (HsWB { hswb_cts = typats }) context -- explicit type patterns
@@ -686,12 +734,13 @@ pp_fam_inst_lhs thing (HsWB { hswb_cts = typats }) context -- explicit type patt
, hsep (map (pprParendHsType.unLoc) typats)]
pprTyClDeclFlavour :: TyClDecl a -> SDoc
-pprTyClDeclFlavour (ClassDecl {}) = ptext (sLit "class")
-pprTyClDeclFlavour (FamDecl {}) = ptext (sLit "family")
-pprTyClDeclFlavour (SynDecl {}) = ptext (sLit "type")
-pprTyClDeclFlavour (DataDecl { tcdDataDefn = (HsDataDefn { dd_ND = nd }) })
- = ppr nd
+pprTyClDeclFlavour (ClassDecl {}) = ptext (sLit "class")
+pprTyClDeclFlavour (SynDecl {}) = ptext (sLit "type")
pprTyClDeclFlavour (ForeignType {}) = ptext (sLit "foreign type")
+pprTyClDeclFlavour (FamDecl { tcdFam = FamilyDecl { fdInfo = info }})
+ = pprFlavour info
+pprTyClDeclFlavour (DataDecl { tcdDataDefn = HsDataDefn { dd_ND = nd } })
+ = ppr nd
\end{code}
%************************************************************************
@@ -893,25 +942,49 @@ pprConDecl decl@(ConDecl { con_details = InfixCon ty1 ty2, con_res = ResTyGADT {
%* *
%************************************************************************
+Note [Type family instance declarations in HsSyn]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The data type TyFamEqn represents one equation of a type family instance.
+It is parameterised over its tfe_pats field:
+
+ * An ordinary type family instance declaration looks like this in source Haskell
+ type instance T [a] Int = a -> a
+ (or something similar for a closed family)
+ It is represented by a TyFamInstEqn, with *type* in the tfe_pats field.
+
+ * On the other hand, the *default instance* of an associated type looksl like
+ this in source Haskell
+ class C a where
+ type T a b
+ type T a b = a -> b -- The default instance
+ It is represented by a TyFamDefltEqn, with *type variables8 in the tfe_pats field.
+
\begin{code}
----------------- Type synonym family instances -------------
+type LTyFamInstEqn name = Located (TyFamInstEqn name)
+type LTyFamDefltEqn name = Located (TyFamDefltEqn name)
-type LTyFamInstEqn name = Located (TyFamInstEqn name)
-
--- | One equation in a type family instance declaration
-data TyFamInstEqn name
- = TyFamInstEqn
- { tfie_tycon :: Located name
- , tfie_pats :: HsWithBndrs [LHsType name]
+type HsTyPats name = HsWithBndrs [LHsType name]
-- ^ Type patterns (with kind and type bndrs)
-- See Note [Family instance declaration binders]
- , tfie_rhs :: LHsType name }
+
+type TyFamInstEqn name = TyFamEqn name (HsTyPats name)
+type TyFamDefltEqn name = TyFamEqn name (LHsTyVarBndrs name)
+ -- See Note [Type family instance declarations in HsSyn]
+
+-- | One equation in a type family instance declaration
+-- See Note [Type family instance declarations in HsSyn]
+data TyFamEqn name pats
+ = TyFamEqn
+ { tfe_tycon :: Located name
+ , tfe_pats :: pats
+ , tfe_rhs :: LHsType name }
deriving( Typeable, Data )
type LTyFamInstDecl name = Located (TyFamInstDecl name)
-data TyFamInstDecl name
+data TyFamInstDecl name
= TyFamInstDecl
- { tfid_eqn :: LTyFamInstEqn name
+ { tfid_eqn :: LTyFamInstEqn name
, tfid_fvs :: NameSet }
deriving( Typeable, Data )
@@ -921,11 +994,9 @@ type LDataFamInstDecl name = Located (DataFamInstDecl name)
data DataFamInstDecl name
= DataFamInstDecl
{ dfid_tycon :: Located name
- , dfid_pats :: HsWithBndrs [LHsType name] -- lhs
- -- ^ Type patterns (with kind and type bndrs)
- -- See Note [Family instance declaration binders]
- , dfid_defn :: HsDataDefn name -- rhs
- , dfid_fvs :: NameSet } -- free vars for dependency analysis
+ , dfid_pats :: HsTyPats name -- LHS
+ , dfid_defn :: HsDataDefn name -- RHS
+ , dfid_fvs :: NameSet } -- Rree vars for dependency analysis
deriving( Typeable, Data )
@@ -937,10 +1008,11 @@ data ClsInstDecl name
{ cid_poly_ty :: LHsType name -- Context => Class Instance-type
-- Using a polytype means that the renamer conveniently
-- figures out the quantified type variables for us.
- , cid_binds :: LHsBinds name
- , cid_sigs :: [LSig name] -- User-supplied pragmatic info
- , cid_tyfam_insts :: [LTyFamInstDecl name] -- type family instances
- , cid_datafam_insts :: [LDataFamInstDecl name] -- data family instances
+ , cid_binds :: LHsBinds name -- Class methods
+ , cid_sigs :: [LSig name] -- User-supplied pragmatic info
+ , cid_tyfam_insts :: [LTyFamInstDecl name] -- Type family instances
+ , cid_datafam_insts :: [LDataFamInstDecl name] -- Data family instances
+ , cid_overlap_mode :: Maybe OverlapMode
}
deriving (Data, Typeable)
@@ -983,17 +1055,23 @@ instance (OutputableBndr name) => Outputable (TyFamInstDecl name) where
pprTyFamInstDecl :: OutputableBndr name => TopLevelFlag -> TyFamInstDecl name -> SDoc
pprTyFamInstDecl top_lvl (TyFamInstDecl { tfid_eqn = eqn })
- = ptext (sLit "type") <+> ppr_instance_keyword top_lvl <+> (ppr eqn)
+ = ptext (sLit "type") <+> ppr_instance_keyword top_lvl <+> ppr_fam_inst_eqn eqn
ppr_instance_keyword :: TopLevelFlag -> SDoc
ppr_instance_keyword TopLevel = ptext (sLit "instance")
ppr_instance_keyword NotTopLevel = empty
-instance (OutputableBndr name) => Outputable (TyFamInstEqn name) where
- ppr (TyFamInstEqn { tfie_tycon = tycon
- , tfie_pats = pats
- , tfie_rhs = rhs })
- = (pp_fam_inst_lhs tycon pats []) <+> equals <+> (ppr rhs)
+ppr_fam_inst_eqn :: OutputableBndr name => LTyFamInstEqn name -> SDoc
+ppr_fam_inst_eqn (L _ (TyFamEqn { tfe_tycon = tycon
+ , tfe_pats = pats
+ , tfe_rhs = rhs }))
+ = pp_fam_inst_lhs tycon pats [] <+> equals <+> ppr rhs
+
+ppr_fam_deflt_eqn :: OutputableBndr name => LTyFamDefltEqn name -> SDoc
+ppr_fam_deflt_eqn (L _ (TyFamEqn { tfe_tycon = tycon
+ , tfe_pats = tvs
+ , tfe_rhs = rhs }))
+ = pp_vanilla_decl_head tycon tvs [] <+> equals <+> ppr rhs
instance (OutputableBndr name) => Outputable (DataFamInstDecl name) where
ppr = pprDataFamInstDecl TopLevel
@@ -1013,6 +1091,7 @@ pprDataFamInstFlavour (DataFamInstDecl { dfid_defn = (HsDataDefn { dd_ND = nd })
instance (OutputableBndr name) => Outputable (ClsInstDecl name) where
ppr (ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = binds
, cid_sigs = sigs, cid_tyfam_insts = ats
+ , cid_overlap_mode = mbOverlap
, cid_datafam_insts = adts })
| null sigs, null ats, null adts, isEmptyBag binds -- No "where" part
= top_matter
@@ -1024,7 +1103,21 @@ instance (OutputableBndr name) => Outputable (ClsInstDecl name) where
map (pprDataFamInstDecl NotTopLevel . unLoc) adts ++
pprLHsBindsForUser binds sigs ]
where
- top_matter = ptext (sLit "instance") <+> ppr inst_ty
+ top_matter = ptext (sLit "instance") <+> ppOverlapPragma mbOverlap
+ <+> ppr inst_ty
+
+ppOverlapPragma :: Maybe OverlapMode -> SDoc
+ppOverlapPragma mb =
+ case mb of
+ Nothing -> empty
+ Just NoOverlap -> ptext (sLit "{-# NO_OVERLAP #-}")
+ Just Overlappable -> ptext (sLit "{-# OVERLAPPABLE #-}")
+ Just Overlapping -> ptext (sLit "{-# OVERLAPPING #-}")
+ Just Overlaps -> ptext (sLit "{-# OVERLAPS #-}")
+ Just Incoherent -> ptext (sLit "{-# INCOHERENT #-}")
+
+
+
instance (OutputableBndr name) => Outputable (InstDecl name) where
ppr (ClsInstD { cid_inst = decl }) = ppr decl
@@ -1052,12 +1145,14 @@ instDeclDataFamInsts inst_decls
\begin{code}
type LDerivDecl name = Located (DerivDecl name)
-data DerivDecl name = DerivDecl { deriv_type :: LHsType name }
+data DerivDecl name = DerivDecl { deriv_type :: LHsType name
+ , deriv_overlap_mode :: Maybe OverlapMode
+ }
deriving (Data, Typeable)
instance (OutputableBndr name) => Outputable (DerivDecl name) where
- ppr (DerivDecl ty)
- = hsep [ptext (sLit "deriving instance"), ppr ty]
+ ppr (DerivDecl ty o)
+ = hsep [ptext (sLit "deriving instance"), ppOverlapPragma o, ppr ty]
\end{code}
%************************************************************************
@@ -1236,7 +1331,7 @@ instance OutputableBndr name => Outputable (RuleDecl name) where
nest 4 (equals <+> pprExpr (unLoc rhs) <+> text "#-}") ]
where
pp_forall | null ns = empty
- | otherwise = text "forall" <+> fsep (map ppr ns) <> dot
+ | otherwise = forAllLit <+> fsep (map ppr ns) <> dot
instance OutputableBndr name => Outputable (RuleBndr name) where
ppr (RuleBndr name) = ppr name
diff --git a/compiler/hsSyn/HsDoc.hs b/compiler/hsSyn/HsDoc.hs
index 1f3adafec1b1..72bf0e56a44f 100644
--- a/compiler/hsSyn/HsDoc.hs
+++ b/compiler/hsSyn/HsDoc.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module HsDoc (
HsDocString(..),
@@ -20,7 +20,7 @@ newtype HsDocString = HsDocString FastString
type LHsDocString = Located HsDocString
instance Outputable HsDocString where
- ppr _ = text ""
+ ppr (HsDocString fs) = ftext fs
ppr_mbDoc :: Maybe LHsDocString -> SDoc
ppr_mbDoc (Just doc) = ppr doc
diff --git a/compiler/hsSyn/HsExpr.lhs b/compiler/hsSyn/HsExpr.lhs
index 4c0c955cdd93..69b6df64ec21 100644
--- a/compiler/hsSyn/HsExpr.lhs
+++ b/compiler/hsSyn/HsExpr.lhs
@@ -3,7 +3,7 @@
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
\begin{code}
-{-# LANGUAGE DeriveDataTypeable, ScopedTypeVariables #-}
+{-# LANGUAGE CPP, DeriveDataTypeable, ScopedTypeVariables #-}
-- | Abstract Haskell syntax for expressions.
module HsExpr where
@@ -79,8 +79,6 @@ noSyntaxExpr = HsLit (HsString (fsLit "noSyntaxExpr"))
type CmdSyntaxTable id = [(Name, SyntaxExpr id)]
-- See Note [CmdSyntaxTable]
-noSyntaxTable :: CmdSyntaxTable id
-noSyntaxTable = []
\end{code}
Note [CmdSyntaxtable]
@@ -88,7 +86,7 @@ Note [CmdSyntaxtable]
Used only for arrow-syntax stuff (HsCmdTop), the CmdSyntaxTable keeps
track of the methods needed for a Cmd.
-* Before the renamer, this list is 'noSyntaxTable'
+* Before the renamer, this list is an empty list
* After the renamer, it takes the form @[(std_name, HsVar actual_name)]@
For example, for the 'arr' method
@@ -630,13 +628,13 @@ ppr_expr (HsTickPragma externalSrcLoc exp)
ptext (sLit ")")]
ppr_expr (HsArrApp arrow arg _ HsFirstOrderApp True)
- = hsep [ppr_lexpr arrow, ptext (sLit "-<"), ppr_lexpr arg]
+ = hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg]
ppr_expr (HsArrApp arrow arg _ HsFirstOrderApp False)
- = hsep [ppr_lexpr arg, ptext (sLit ">-"), ppr_lexpr arrow]
+ = hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow]
ppr_expr (HsArrApp arrow arg _ HsHigherOrderApp True)
- = hsep [ppr_lexpr arrow, ptext (sLit "-<<"), ppr_lexpr arg]
+ = hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg]
ppr_expr (HsArrApp arrow arg _ HsHigherOrderApp False)
- = hsep [ppr_lexpr arg, ptext (sLit ">>-"), ppr_lexpr arrow]
+ = hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow]
ppr_expr (HsArrForm (L _ (HsVar v)) (Just _) [arg1, arg2])
= sep [pprCmdArg (unLoc arg1), hsep [pprInfixOcc v, pprCmdArg (unLoc arg2)]]
@@ -849,13 +847,13 @@ ppr_cmd (HsCmdCast co cmd) = sep [ ppr_cmd cmd
, ptext (sLit "|>") <+> ppr co ]
ppr_cmd (HsCmdArrApp arrow arg _ HsFirstOrderApp True)
- = hsep [ppr_lexpr arrow, ptext (sLit "-<"), ppr_lexpr arg]
+ = hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg]
ppr_cmd (HsCmdArrApp arrow arg _ HsFirstOrderApp False)
- = hsep [ppr_lexpr arg, ptext (sLit ">-"), ppr_lexpr arrow]
+ = hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow]
ppr_cmd (HsCmdArrApp arrow arg _ HsHigherOrderApp True)
- = hsep [ppr_lexpr arrow, ptext (sLit "-<<"), ppr_lexpr arg]
+ = hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg]
ppr_cmd (HsCmdArrApp arrow arg _ HsHigherOrderApp False)
- = hsep [ppr_lexpr arg, ptext (sLit ">>-"), ppr_lexpr arrow]
+ = hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow]
ppr_cmd (HsCmdArrForm (L _ (HsVar v)) (Just _) [arg1, arg2])
= sep [pprCmdArg (unLoc arg1), hsep [pprInfixOcc v, pprCmdArg (unLoc arg2)]]
@@ -909,7 +907,8 @@ patterns in each equation.
data MatchGroup id body
= MG { mg_alts :: [LMatch id body] -- The alternatives
, mg_arg_tys :: [PostTcType] -- Types of the arguments, t1..tn
- , mg_res_ty :: PostTcType } -- Type of the result, tr
+ , mg_res_ty :: PostTcType -- Type of the result, tr
+ , mg_origin :: Origin }
-- The type is the type of the entire group
-- t1 -> ... -> tn -> tr
-- where there are n patterns
@@ -1299,7 +1298,7 @@ instance (OutputableBndr idL, OutputableBndr idR, Outputable body)
pprStmt :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> (StmtLR idL idR body) -> SDoc
pprStmt (LastStmt expr _) = ifPprDebug (ptext (sLit "[last]")) <+> ppr expr
-pprStmt (BindStmt pat expr _ _) = hsep [ppr pat, ptext (sLit "<-"), ppr expr]
+pprStmt (BindStmt pat expr _ _) = hsep [ppr pat, larrow, ppr expr]
pprStmt (LetStmt binds) = hsep [ptext (sLit "let"), pprBinds binds]
pprStmt (BodyStmt expr _ _ _) = ppr expr
pprStmt (ParStmt stmtss _ _) = sep (punctuate (ptext (sLit " | ")) (map ppr stmtss))
diff --git a/compiler/hsSyn/HsLit.lhs b/compiler/hsSyn/HsLit.lhs
index 9565acbc8f89..a4749dd730a7 100644
--- a/compiler/hsSyn/HsLit.lhs
+++ b/compiler/hsSyn/HsLit.lhs
@@ -5,14 +5,14 @@
\section[HsLit]{Abstract syntax: source-language literals}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
-{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module HsLit where
diff --git a/compiler/hsSyn/HsPat.lhs b/compiler/hsSyn/HsPat.lhs
index 9d458b79c4b3..4b8fcdaae73b 100644
--- a/compiler/hsSyn/HsPat.lhs
+++ b/compiler/hsSyn/HsPat.lhs
@@ -16,8 +16,8 @@ module HsPat (
mkPrefixConPat, mkCharLitPat, mkNilPat,
- isBangHsBind, isLiftedPatBind,
- isBangLPat, hsPatNeedsParens,
+ isStrictHsBind, looksLazyPatBind,
+ isStrictLPat, hsPatNeedsParens,
isIrrefutableHsPat,
pprParendLPat
@@ -75,10 +75,13 @@ data Pat id
-- overall type of the pattern, and the toList
-- function to convert the scrutinee to a list value
- | TuplePat [LPat id] -- Tuple
- Boxity -- UnitPat is TuplePat []
- PostTcType
- -- You might think that the PostTcType was redundant, but it's essential
+ | TuplePat [LPat id] -- Tuple sub-patterns
+ Boxity -- UnitPat is TuplePat []
+ [PostTcType] -- [] before typechecker, filled in afterwards with
+ -- the types of the tuple components
+ -- You might think that the PostTcType was redundant, because we can
+ -- get the pattern type by getting the types of the sub-patterns.
+ -- But it's essential
-- data T a where
-- T1 :: Int -> T Int
-- f :: (T a, a) -> Int
@@ -89,6 +92,8 @@ data Pat id
-- Note the (w::a), NOT (w::Int), because we have not yet
-- refined 'a' to Int. So we must know that the second component
-- of the tuple is of type 'a' not Int. See selectMatchVar
+ -- (June 14: I'm not sure this comment is right; the sub-patterns
+ -- will be wrapped in CoPats, no?)
| PArrPat [LPat id] -- Syntactic parallel array
PostTcType -- The type of the elements
@@ -98,14 +103,18 @@ data Pat id
(HsConPatDetails id)
| ConPatOut {
- pat_con :: Located ConLike,
+ pat_con :: Located ConLike,
+ pat_arg_tys :: [Type], -- The univeral arg types, 1-1 with the universal
+ -- tyvars of the constructor/pattern synonym
+ -- Use (conLikeResTy pat_con pat_arg_tys) to get
+ -- the type of the pattern
+
pat_tvs :: [TyVar], -- Existentially bound type variables (tyvars only)
pat_dicts :: [EvVar], -- Ditto *coercion variables* and *dictionaries*
-- One reason for putting coercion variable here, I think,
-- is to ensure their kinds are zonked
pat_binds :: TcEvBinds, -- Bindings involving those dictionaries
pat_args :: HsConPatDetails id,
- pat_ty :: Type, -- The type of the pattern
pat_wrap :: HsWrapper -- Extra wrapper to pass to the matcher
}
@@ -313,18 +322,18 @@ instance (OutputableBndr id, Outputable arg)
%************************************************************************
\begin{code}
-mkPrefixConPat :: DataCon -> [OutPat id] -> Type -> OutPat id
+mkPrefixConPat :: DataCon -> [OutPat id] -> [Type] -> OutPat id
-- Make a vanilla Prefix constructor pattern
-mkPrefixConPat dc pats ty
+mkPrefixConPat dc pats tys
= noLoc $ ConPatOut { pat_con = noLoc (RealDataCon dc), pat_tvs = [], pat_dicts = [],
pat_binds = emptyTcEvBinds, pat_args = PrefixCon pats,
- pat_ty = ty, pat_wrap = idHsWrapper }
+ pat_arg_tys = tys, pat_wrap = idHsWrapper }
mkNilPat :: Type -> OutPat id
-mkNilPat ty = mkPrefixConPat nilDataCon [] ty
+mkNilPat ty = mkPrefixConPat nilDataCon [] [ty]
mkCharLitPat :: Char -> OutPat id
-mkCharLitPat c = mkPrefixConPat charDataCon [noLoc $ LitPat (HsCharPrim c)] charTy
+mkCharLitPat c = mkPrefixConPat charDataCon [noLoc $ LitPat (HsCharPrim c)] []
\end{code}
@@ -358,34 +367,34 @@ patterns are treated specially, of course.
The 1.3 report defines what ``irrefutable'' and ``failure-free'' patterns are.
\begin{code}
-isBangLPat :: LPat id -> Bool
-isBangLPat (L _ (BangPat {})) = True
-isBangLPat (L _ (ParPat p)) = isBangLPat p
-isBangLPat _ = False
-
-isBangHsBind :: HsBind id -> Bool
--- A pattern binding with an outermost bang
+isStrictLPat :: LPat id -> Bool
+isStrictLPat (L _ (ParPat p)) = isStrictLPat p
+isStrictLPat (L _ (BangPat {})) = True
+isStrictLPat (L _ (TuplePat _ Unboxed _)) = True
+isStrictLPat _ = False
+
+isStrictHsBind :: HsBind id -> Bool
+-- A pattern binding with an outermost bang or unboxed tuple must be matched strictly
-- Defined in this module because HsPat is above HsBinds in the import graph
-isBangHsBind (PatBind { pat_lhs = p }) = isBangLPat p
-isBangHsBind _ = False
-
-isLiftedPatBind :: HsBind id -> Bool
--- A pattern binding with a compound pattern, not just a variable
--- (I# x) yes
--- (# a, b #) no, even if a::Int#
--- x no, even if x::Int#
--- We want to warn about a missing bang-pattern on the yes's
-isLiftedPatBind (PatBind { pat_lhs = p }) = isLiftedLPat p
-isLiftedPatBind _ = False
-
-isLiftedLPat :: LPat id -> Bool
-isLiftedLPat (L _ (ParPat p)) = isLiftedLPat p
-isLiftedLPat (L _ (BangPat p)) = isLiftedLPat p
-isLiftedLPat (L _ (AsPat _ p)) = isLiftedLPat p
-isLiftedLPat (L _ (TuplePat _ Unboxed _)) = False
-isLiftedLPat (L _ (VarPat {})) = False
-isLiftedLPat (L _ (WildPat {})) = False
-isLiftedLPat _ = True
+isStrictHsBind (PatBind { pat_lhs = p }) = isStrictLPat p
+isStrictHsBind _ = False
+
+looksLazyPatBind :: HsBind id -> Bool
+-- Returns True of anything *except*
+-- a StrictHsBind (as above) or
+-- a VarPat
+-- In particular, returns True of a pattern binding with a compound pattern, like (I# x)
+looksLazyPatBind (PatBind { pat_lhs = p }) = looksLazyLPat p
+looksLazyPatBind _ = False
+
+looksLazyLPat :: LPat id -> Bool
+looksLazyLPat (L _ (ParPat p)) = looksLazyLPat p
+looksLazyLPat (L _ (AsPat _ p)) = looksLazyLPat p
+looksLazyLPat (L _ (BangPat {})) = False
+looksLazyLPat (L _ (TuplePat _ Unboxed _)) = False
+looksLazyLPat (L _ (VarPat {})) = False
+looksLazyLPat (L _ (WildPat {})) = False
+looksLazyLPat _ = True
isIrrefutableHsPat :: OutputableBndr id => LPat id -> Bool
-- (isIrrefutableHsPat p) is true if matching against p cannot fail,
diff --git a/compiler/hsSyn/HsSyn.lhs b/compiler/hsSyn/HsSyn.lhs
index e9c3a5eeee37..72cbac14874b 100644
--- a/compiler/hsSyn/HsSyn.lhs
+++ b/compiler/hsSyn/HsSyn.lhs
@@ -23,7 +23,7 @@ module HsSyn (
module HsDoc,
Fixity,
- HsModule(..), HsExtCore(..),
+ HsModule(..)
) where
-- friends:
@@ -40,10 +40,9 @@ import HsDoc
-- others:
import OccName ( HasOccName )
-import IfaceSyn ( IfaceBinding )
import Outputable
import SrcLoc
-import Module ( Module, ModuleName )
+import Module ( ModuleName )
import FastString
-- libraries:
@@ -77,13 +76,6 @@ data HsModule name
hsmodHaddockModHeader :: Maybe LHsDocString
-- ^ Haddock module info and description, unparsed
} deriving (Data, Typeable)
-
-data HsExtCore name -- Read from Foo.hcr
- = HsExtCore
- Module
- [TyClDecl name] -- Type declarations only; just as in Haskell source,
- -- so that we can infer kinds etc
- [IfaceBinding] -- And the bindings
\end{code}
diff --git a/compiler/hsSyn/HsTypes.lhs b/compiler/hsSyn/HsTypes.lhs
index 28c6a2b89c73..0cf8455bad4c 100644
--- a/compiler/hsSyn/HsTypes.lhs
+++ b/compiler/hsSyn/HsTypes.lhs
@@ -25,7 +25,7 @@ module HsTypes (
ConDeclField(..), pprConDeclFields,
- mkHsQTvs, hsQTvBndrs,
+ mkHsQTvs, hsQTvBndrs, isHsKindedTyVar, hsTvbAllKinded,
mkExplicitHsForAllTy, mkImplicitHsForAllTy, hsExplicitTvs,
hsTyVarName, mkHsWithBndrs, hsLKiTyVarNames,
hsLTyVarName, hsLTyVarNames, hsLTyVarLocName, hsLTyVarLocNames,
@@ -35,7 +35,7 @@ module HsTypes (
splitHsAppTys, hsTyGetAppHead_maybe, mkHsAppTys, mkHsOpTy,
-- Printing
- pprParendHsType, pprHsForAll, pprHsContext, pprHsContextNoArrow, ppr_hs_context,
+ pprParendHsType, pprHsForAll, pprHsContext, pprHsContextNoArrow,
) where
import {-# SOURCE #-} HsExpr ( HsSplice, pprUntypedSplice )
@@ -45,6 +45,7 @@ import HsLit
import Name( Name )
import RdrName( RdrName )
import DataCon( HsBang(..) )
+import TysPrim( funTyConName )
import Type
import HsDoc
import BasicTypes
@@ -162,7 +163,7 @@ mkHsWithBndrs x = HsWB { hswb_cts = x, hswb_kvs = panic "mkHsTyWithBndrs:kvs"
, hswb_tvs = panic "mkHsTyWithBndrs:tvs" }
--- | These names are used eary on to store the names of implicit
+-- | These names are used early on to store the names of implicit
-- parameters. They completely disappear after type-checking.
newtype HsIPName = HsIPName FastString-- ?x
deriving( Eq, Data, Typeable )
@@ -187,6 +188,15 @@ data HsTyVarBndr name
(LHsKind name) -- The user-supplied kind signature
deriving (Data, Typeable)
+-- | Does this 'HsTyVarBndr' come with an explicit kind annotation?
+isHsKindedTyVar :: HsTyVarBndr name -> Bool
+isHsKindedTyVar (UserTyVar {}) = False
+isHsKindedTyVar (KindedTyVar {}) = True
+
+-- | Do all type variables in this 'LHsTyVarBndr' come with kind annotations?
+hsTvbAllKinded :: LHsTyVarBndrs name -> Bool
+hsTvbAllKinded = all (isHsKindedTyVar . unLoc) . hsQTvBndrs
+
data HsType name
= HsForAllTy HsExplicitFlag -- Renamer leaves this flag unchanged, to record the way
-- the user wrote it originally, so that the printer can
@@ -506,15 +516,31 @@ splitLHsClassTy_maybe ty
HsKindSig ty _ -> checkl ty args
_ -> Nothing
--- Splits HsType into the (init, last) parts
+-- splitHsFunType decomposes a type (t1 -> t2 ... -> tn)
-- Breaks up any parens in the result type:
-- splitHsFunType (a -> (b -> c)) = ([a,b], c)
-splitHsFunType :: LHsType name -> ([LHsType name], LHsType name)
-splitHsFunType (L _ (HsFunTy x y)) = (x:args, res)
- where
- (args, res) = splitHsFunType y
-splitHsFunType (L _ (HsParTy ty)) = splitHsFunType ty
-splitHsFunType other = ([], other)
+-- Also deals with (->) t1 t2; that is why it only works on LHsType Name
+-- (see Trac #9096)
+splitHsFunType :: LHsType Name -> ([LHsType Name], LHsType Name)
+splitHsFunType (L _ (HsParTy ty))
+ = splitHsFunType ty
+
+splitHsFunType (L _ (HsFunTy x y))
+ | (args, res) <- splitHsFunType y
+ = (x:args, res)
+
+splitHsFunType orig_ty@(L _ (HsAppTy t1 t2))
+ = go t1 [t2]
+ where -- Look for (->) t1 t2, possibly with parenthesisation
+ go (L _ (HsTyVar fn)) tys | fn == funTyConName
+ , [t1,t2] <- tys
+ , (args, res) <- splitHsFunType t2
+ = (t1:args, res)
+ go (L _ (HsAppTy t1 t2)) tys = go t1 (t2:tys)
+ go (L _ (HsParTy ty)) tys = go ty tys
+ go _ _ = ([], orig_ty) -- Failure to match
+
+splitHsFunType other = ([], other)
\end{code}
@@ -550,7 +576,7 @@ pprHsForAll exp qtvs cxt
show_forall = opt_PprStyle_Debug
|| (not (null (hsQTvBndrs qtvs)) && is_explicit)
is_explicit = case exp of {Explicit -> True; Implicit -> False}
- forall_part = ptext (sLit "forall") <+> ppr qtvs <> dot
+ forall_part = forAllLit <+> ppr qtvs <> dot
pprHsContext :: (OutputableBndr name) => HsContext name -> SDoc
pprHsContext [] = empty
@@ -558,12 +584,8 @@ pprHsContext cxt = pprHsContextNoArrow cxt <+> darrow
pprHsContextNoArrow :: (OutputableBndr name) => HsContext name -> SDoc
pprHsContextNoArrow [] = empty
-pprHsContextNoArrow [L _ pred] = ppr pred
-pprHsContextNoArrow cxt = ppr_hs_context cxt
-
-ppr_hs_context :: (OutputableBndr name) => HsContext name -> SDoc
-ppr_hs_context [] = empty
-ppr_hs_context cxt = parens (interpp'SP cxt)
+pprHsContextNoArrow [L _ pred] = ppr_mono_ty FunPrec pred
+pprHsContextNoArrow cxt = parens (interpp'SP cxt)
pprConDeclFields :: OutputableBndr name => [ConDeclField name] -> SDoc
pprConDeclFields fields = braces (sep (punctuate comma (map ppr_fld fields)))
@@ -585,27 +607,12 @@ and the problem doesn't show up; but having the flag on a KindedTyVar
seems like the Right Thing anyway.)
\begin{code}
-pREC_TOP, pREC_FUN, pREC_OP, pREC_CON :: Int
-pREC_TOP = 0 -- type in ParseIface.y
-pREC_FUN = 1 -- btype in ParseIface.y
- -- Used for LH arg of (->)
-pREC_OP = 2 -- Used for arg of any infix operator
- -- (we don't keep their fixities around)
-pREC_CON = 3 -- Used for arg of type applicn:
- -- always parenthesise unless atomic
-
-maybeParen :: Int -- Precedence of context
- -> Int -- Precedence of top-level operator
- -> SDoc -> SDoc -- Wrap in parens if (ctxt >= op)
-maybeParen ctxt_prec op_prec p | ctxt_prec >= op_prec = parens p
- | otherwise = p
-
--- printing works more-or-less as for Types
+-- Printing works more-or-less as for Types
pprHsType, pprParendHsType :: (OutputableBndr name) => HsType name -> SDoc
-pprHsType ty = getPprStyle $ \sty -> ppr_mono_ty pREC_TOP (prepare sty ty)
-pprParendHsType ty = ppr_mono_ty pREC_CON ty
+pprHsType ty = getPprStyle $ \sty -> ppr_mono_ty TopPrec (prepare sty ty)
+pprParendHsType ty = ppr_mono_ty TyConPrec ty
-- Before printing a type
-- (a) Remove outermost HsParTy parens
@@ -615,15 +622,15 @@ prepare :: PprStyle -> HsType name -> HsType name
prepare sty (HsParTy ty) = prepare sty (unLoc ty)
prepare _ ty = ty
-ppr_mono_lty :: (OutputableBndr name) => Int -> LHsType name -> SDoc
+ppr_mono_lty :: (OutputableBndr name) => TyPrec -> LHsType name -> SDoc
ppr_mono_lty ctxt_prec ty = ppr_mono_ty ctxt_prec (unLoc ty)
-ppr_mono_ty :: (OutputableBndr name) => Int -> HsType name -> SDoc
+ppr_mono_ty :: (OutputableBndr name) => TyPrec -> HsType name -> SDoc
ppr_mono_ty ctxt_prec (HsForAllTy exp tvs ctxt ty)
- = maybeParen ctxt_prec pREC_FUN $
- sep [pprHsForAll exp tvs ctxt, ppr_mono_lty pREC_TOP ty]
+ = maybeParen ctxt_prec FunPrec $
+ sep [pprHsForAll exp tvs ctxt, ppr_mono_lty TopPrec ty]
-ppr_mono_ty _ (HsBangTy b ty) = ppr b <> ppr_mono_lty pREC_CON ty
+ppr_mono_ty _ (HsBangTy b ty) = ppr b <> ppr_mono_lty TyConPrec ty
ppr_mono_ty _ (HsQuasiQuoteTy qq) = ppr qq
ppr_mono_ty _ (HsRecTy flds) = pprConDeclFields flds
ppr_mono_ty _ (HsTyVar name) = pprPrefixOcc name
@@ -632,10 +639,10 @@ ppr_mono_ty _ (HsTupleTy con tys) = tupleParens std_con (interpp'SP tys)
where std_con = case con of
HsUnboxedTuple -> UnboxedTuple
_ -> BoxedTuple
-ppr_mono_ty _ (HsKindSig ty kind) = parens (ppr_mono_lty pREC_TOP ty <+> dcolon <+> ppr kind)
-ppr_mono_ty _ (HsListTy ty) = brackets (ppr_mono_lty pREC_TOP ty)
-ppr_mono_ty _ (HsPArrTy ty) = paBrackets (ppr_mono_lty pREC_TOP ty)
-ppr_mono_ty prec (HsIParamTy n ty) = maybeParen prec pREC_FUN (ppr n <+> dcolon <+> ppr_mono_lty pREC_TOP ty)
+ppr_mono_ty _ (HsKindSig ty kind) = parens (ppr_mono_lty TopPrec ty <+> dcolon <+> ppr kind)
+ppr_mono_ty _ (HsListTy ty) = brackets (ppr_mono_lty TopPrec ty)
+ppr_mono_ty _ (HsPArrTy ty) = paBrackets (ppr_mono_lty TopPrec ty)
+ppr_mono_ty prec (HsIParamTy n ty) = maybeParen prec FunPrec (ppr n <+> dcolon <+> ppr_mono_lty TopPrec ty)
ppr_mono_ty _ (HsSpliceTy s _) = pprUntypedSplice s
ppr_mono_ty _ (HsCoreTy ty) = ppr ty
ppr_mono_ty _ (HsExplicitListTy _ tys) = quote $ brackets (interpp'SP tys)
@@ -651,45 +658,45 @@ ppr_mono_ty ctxt_prec (HsWrapTy (WpKiApps _kis) ty)
where
go ctxt_prec [] ty = ppr_mono_ty ctxt_prec ty
go ctxt_prec (ki:kis) ty
- = maybeParen ctxt_prec pREC_CON $
- hsep [ go pREC_FUN kis ty
+ = maybeParen ctxt_prec TyConPrec $
+ hsep [ go FunPrec kis ty
, ptext (sLit "@") <> pprParendKind ki ]
-}
ppr_mono_ty ctxt_prec (HsEqTy ty1 ty2)
- = maybeParen ctxt_prec pREC_OP $
- ppr_mono_lty pREC_OP ty1 <+> char '~' <+> ppr_mono_lty pREC_OP ty2
+ = maybeParen ctxt_prec TyOpPrec $
+ ppr_mono_lty TyOpPrec ty1 <+> char '~' <+> ppr_mono_lty TyOpPrec ty2
ppr_mono_ty ctxt_prec (HsAppTy fun_ty arg_ty)
- = maybeParen ctxt_prec pREC_CON $
- hsep [ppr_mono_lty pREC_FUN fun_ty, ppr_mono_lty pREC_CON arg_ty]
+ = maybeParen ctxt_prec TyConPrec $
+ hsep [ppr_mono_lty FunPrec fun_ty, ppr_mono_lty TyConPrec arg_ty]
ppr_mono_ty ctxt_prec (HsOpTy ty1 (_wrapper, L _ op) ty2)
- = maybeParen ctxt_prec pREC_OP $
- sep [ ppr_mono_lty pREC_OP ty1
- , sep [pprInfixOcc op, ppr_mono_lty pREC_OP ty2 ] ]
+ = maybeParen ctxt_prec TyOpPrec $
+ sep [ ppr_mono_lty TyOpPrec ty1
+ , sep [pprInfixOcc op, ppr_mono_lty TyOpPrec ty2 ] ]
-- Don't print the wrapper (= kind applications)
-- c.f. HsWrapTy
ppr_mono_ty _ (HsParTy ty)
- = parens (ppr_mono_lty pREC_TOP ty)
+ = parens (ppr_mono_lty TopPrec ty)
-- Put the parens in where the user did
-- But we still use the precedence stuff to add parens because
-- toHsType doesn't put in any HsParTys, so we may still need them
ppr_mono_ty ctxt_prec (HsDocTy ty doc)
- = maybeParen ctxt_prec pREC_OP $
- ppr_mono_lty pREC_OP ty <+> ppr (unLoc doc)
+ = maybeParen ctxt_prec TyOpPrec $
+ ppr_mono_lty TyOpPrec ty <+> ppr (unLoc doc)
-- we pretty print Haddock comments on types as if they were
-- postfix operators
--------------------------
-ppr_fun_ty :: (OutputableBndr name) => Int -> LHsType name -> LHsType name -> SDoc
+ppr_fun_ty :: (OutputableBndr name) => TyPrec -> LHsType name -> LHsType name -> SDoc
ppr_fun_ty ctxt_prec ty1 ty2
- = let p1 = ppr_mono_lty pREC_FUN ty1
- p2 = ppr_mono_lty pREC_TOP ty2
+ = let p1 = ppr_mono_lty FunPrec ty1
+ p2 = ppr_mono_lty TopPrec ty2
in
- maybeParen ctxt_prec pREC_FUN $
+ maybeParen ctxt_prec FunPrec $
sep [p1, ptext (sLit "->") <+> p2]
--------------------------
diff --git a/compiler/hsSyn/HsUtils.lhs b/compiler/hsSyn/HsUtils.lhs
index 558c104fadd3..5d4d22fae2c1 100644
--- a/compiler/hsSyn/HsUtils.lhs
+++ b/compiler/hsSyn/HsUtils.lhs
@@ -1,10 +1,12 @@
+> {-# LANGUAGE ScopedTypeVariables #-}
+
%
% (c) The University of Glasgow, 1992-2006
%
Here we collect a variety of helper functions that construct or
analyse HsSyn. All these functions deal with generic HsSyn; functions
-which deal with the intantiated versions are located elsewhere:
+which deal with the instantiated versions are located elsewhere:
Parameterised by Module
---------------- -------------
@@ -13,7 +15,8 @@ which deal with the intantiated versions are located elsewhere:
Id typecheck/TcHsSyn
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -99,7 +102,10 @@ import FastString
import Util
import Bag
import Outputable
+
import Data.Either
+import Data.Function
+import Data.List
\end{code}
@@ -132,8 +138,8 @@ unguardedGRHSs rhs = GRHSs (unguardedRHS rhs) emptyLocalBinds
unguardedRHS :: Located (body id) -> [LGRHS id (Located (body id))]
unguardedRHS rhs@(L loc _) = [L loc (GRHS [] rhs)]
-mkMatchGroup :: [LMatch id (Located (body id))] -> MatchGroup id (Located (body id))
-mkMatchGroup matches = MG { mg_alts = matches, mg_arg_tys = [], mg_res_ty = placeHolderType }
+mkMatchGroup :: Origin -> [LMatch id (Located (body id))] -> MatchGroup id (Located (body id))
+mkMatchGroup origin matches = MG { mg_alts = matches, mg_arg_tys = [], mg_res_ty = placeHolderType, mg_origin = origin }
mkHsAppTy :: LHsType name -> LHsType name -> LHsType name
mkHsAppTy t1 t2 = addCLoc t1 t2 (HsAppTy t1 t2)
@@ -144,7 +150,7 @@ mkHsApp e1 e2 = addCLoc e1 e2 (HsApp e1 e2)
mkHsLam :: [LPat id] -> LHsExpr id -> LHsExpr id
mkHsLam pats body = mkHsPar (L (getLoc body) (HsLam matches))
where
- matches = mkMatchGroup [mkSimpleMatch pats body]
+ matches = mkMatchGroup Generated [mkSimpleMatch pats body]
mkHsLams :: [TyVar] -> [EvVar] -> LHsExpr Id -> LHsExpr Id
mkHsLams tyvars dicts expr = mkLHsWrap (mkWpTyLams tyvars <.> mkWpLams dicts) expr
@@ -351,11 +357,11 @@ nlHsIf :: LHsExpr id -> LHsExpr id -> LHsExpr id -> LHsExpr id
nlHsCase :: LHsExpr id -> [LMatch id (LHsExpr id)] -> LHsExpr id
nlList :: [LHsExpr id] -> LHsExpr id
-nlHsLam match = noLoc (HsLam (mkMatchGroup [match]))
-nlHsPar e = noLoc (HsPar e)
-nlHsIf cond true false = noLoc (mkHsIf cond true false)
-nlHsCase expr matches = noLoc (HsCase expr (mkMatchGroup matches))
-nlList exprs = noLoc (ExplicitList placeHolderType Nothing exprs)
+nlHsLam match = noLoc (HsLam (mkMatchGroup Generated [match]))
+nlHsPar e = noLoc (HsPar e)
+nlHsIf cond true false = noLoc (mkHsIf cond true false)
+nlHsCase expr matches = noLoc (HsCase expr (mkMatchGroup Generated matches))
+nlList exprs = noLoc (ExplicitList placeHolderType Nothing exprs)
nlHsAppTy :: LHsType name -> LHsType name -> LHsType name
nlHsTyVar :: name -> LHsType name
@@ -382,7 +388,7 @@ mkLHsVarTuple :: [a] -> LHsExpr a
mkLHsVarTuple ids = mkLHsTupleExpr (map nlHsVar ids)
nlTuplePat :: [LPat id] -> Boxity -> LPat id
-nlTuplePat pats box = noLoc (TuplePat pats box placeHolderType)
+nlTuplePat pats box = noLoc (TuplePat pats box [])
missingTupArg :: HsTupArg a
missingTupArg = Missing placeHolderType
@@ -478,20 +484,20 @@ l
mkFunBind :: Located RdrName -> [LMatch RdrName (LHsExpr RdrName)] -> HsBind RdrName
-- Not infix, with place holders for coercion and free vars
mkFunBind fn ms = FunBind { fun_id = fn, fun_infix = False
- , fun_matches = mkMatchGroup ms
- , fun_co_fn = idHsWrapper
+ , fun_matches = mkMatchGroup Generated ms
+ , fun_co_fn = idHsWrapper
, bind_fvs = placeHolderNames
- , fun_tick = Nothing }
+ , fun_tick = Nothing }
-mkTopFunBind :: Located Name -> [LMatch Name (LHsExpr Name)] -> HsBind Name
+mkTopFunBind :: Origin -> Located Name -> [LMatch Name (LHsExpr Name)] -> HsBind Name
-- In Name-land, with empty bind_fvs
-mkTopFunBind fn ms = FunBind { fun_id = fn, fun_infix = False
- , fun_matches = mkMatchGroup ms
- , fun_co_fn = idHsWrapper
- , bind_fvs = emptyNameSet -- NB: closed binding
- , fun_tick = Nothing }
+mkTopFunBind origin fn ms = FunBind { fun_id = fn, fun_infix = False
+ , fun_matches = mkMatchGroup origin ms
+ , fun_co_fn = idHsWrapper
+ , bind_fvs = emptyNameSet -- NB: closed binding
+ , fun_tick = Nothing }
-mkHsVarBind :: SrcSpan -> RdrName -> LHsExpr RdrName -> (Origin, LHsBind RdrName)
+mkHsVarBind :: SrcSpan -> RdrName -> LHsExpr RdrName -> LHsBind RdrName
mkHsVarBind loc var rhs = mk_easy_FunBind loc var [] rhs
mkVarBind :: id -> LHsExpr id -> LHsBind id
@@ -499,17 +505,19 @@ mkVarBind var rhs = L (getLoc rhs) $
VarBind { var_id = var, var_rhs = rhs, var_inline = False }
mkPatSynBind :: Located RdrName -> HsPatSynDetails (Located RdrName) -> LPat RdrName -> HsPatSynDir RdrName -> HsBind RdrName
-mkPatSynBind name details lpat dir = PatSynBind{ patsyn_id = name
- , patsyn_args = details
- , patsyn_def = lpat
- , patsyn_dir = dir
- , bind_fvs = placeHolderNames }
+mkPatSynBind name details lpat dir = PatSynBind psb
+ where
+ psb = PSB{ psb_id = name
+ , psb_args = details
+ , psb_def = lpat
+ , psb_dir = dir
+ , psb_fvs = placeHolderNames }
------------
mk_easy_FunBind :: SrcSpan -> RdrName -> [LPat RdrName]
- -> LHsExpr RdrName -> (Origin, LHsBind RdrName)
+ -> LHsExpr RdrName -> LHsBind RdrName
mk_easy_FunBind loc fun pats expr
- = (Generated, L loc $ mkFunBind (L loc fun) [mkMatch pats expr emptyLocalBinds])
+ = L loc $ mkFunBind (L loc fun) [mkMatch pats expr emptyLocalBinds]
------------
mkMatch :: [LPat id] -> LHsExpr id -> HsLocalBinds id -> LMatch id (LHsExpr id)
@@ -571,7 +579,7 @@ collect_bind (AbsBinds { abs_exports = dbinds, abs_binds = _binds }) acc
-- I don't think we want the binders from the nested binds
-- The only time we collect binders from a typechecked
-- binding (hence see AbsBinds) is in zonking in TcHsSyn
-collect_bind (PatSynBind { patsyn_id = L _ ps }) acc = ps : acc
+collect_bind (PatSynBind (PSB { psb_id = L _ ps })) acc = ps : acc
collectHsBindsBinders :: LHsBindsLR idL idR -> [idL]
collectHsBindsBinders binds = collect_binds binds []
@@ -580,11 +588,11 @@ collectHsBindListBinders :: [LHsBindLR idL idR] -> [idL]
collectHsBindListBinders = foldr (collect_bind . unLoc) []
collect_binds :: LHsBindsLR idL idR -> [idL] -> [idL]
-collect_binds binds acc = foldrBag (collect_bind . unLoc . snd) acc binds
+collect_binds binds acc = foldrBag (collect_bind . unLoc) acc binds
collectMethodBinders :: LHsBindsLR RdrName idR -> [Located RdrName]
-- Used exclusively for the bindings of an instance decl which are all FunBinds
-collectMethodBinders binds = foldrBag (get . unLoc . snd) [] binds
+collectMethodBinders binds = foldrBag (get . unLoc) [] binds
where
get (FunBind { fun_id = f }) fs = f : fs
get _ fs = fs
@@ -742,24 +750,26 @@ hsDataDefnBinders (HsDataDefn { dd_cons = cons }) = hsConDeclsBinders cons
-- See Note [Binders in family instances]
-------------------
-hsConDeclsBinders :: (Eq name) => [LConDecl name] -> [Located name]
+hsConDeclsBinders :: forall name. (Eq name) => [LConDecl name] -> [Located name]
-- See hsLTyClDeclBinders for what this does
-- The function is boringly complicated because of the records
-- And since we only have equality, we have to be a little careful
-hsConDeclsBinders cons
- = snd (foldl do_one ([], []) cons)
- where
- do_one (flds_seen, acc) (L loc (ConDecl { con_name = L _ name
- , con_details = RecCon flds }))
- = (map unLoc new_flds ++ flds_seen, L loc name : new_flds ++ acc)
- where
+hsConDeclsBinders cons = go id cons
+ where go :: ([Located name] -> [Located name]) -> [LConDecl name] -> [Located name]
+ go _ [] = []
+ go remSeen (r:rs) =
-- don't re-mangle the location of field names, because we don't
-- have a record of the full location of the field declaration anyway
- new_flds = filterOut (\f -> unLoc f `elem` flds_seen)
- (map cd_fld_name flds)
+ case r of
+ -- remove only the first occurrence of any seen field in order to
+ -- avoid circumventing detection of duplicate fields (#9156)
+ L loc (ConDecl { con_name = L _ name , con_details = RecCon flds }) ->
+ (L loc name) : r' ++ go remSeen' rs
+ where r' = remSeen (map cd_fld_name flds)
+ remSeen' = foldr (.) remSeen [deleteBy ((==) `on` unLoc) v | v <- r']
+ L loc (ConDecl { con_name = L _ name }) ->
+ (L loc name) : go remSeen rs
- do_one (flds_seen, acc) (L loc (ConDecl { con_name = L _ name }))
- = (flds_seen, L loc name : acc)
\end{code}
Note [Binders in family instances]
@@ -808,7 +818,7 @@ hsValBindsImplicits (ValBindsIn binds _)
= lhsBindsImplicits binds
lhsBindsImplicits :: LHsBindsLR Name idR -> NameSet
-lhsBindsImplicits = foldBag unionNameSets (lhs_bind . unLoc . snd) emptyNameSet
+lhsBindsImplicits = foldBag unionNameSets (lhs_bind . unLoc) emptyNameSet
where
lhs_bind (PatBind { pat_lhs = lpat }) = lPatImplicits lpat
lhs_bind _ = emptyNameSet
diff --git a/compiler/iface/BinIface.hs b/compiler/iface/BinIface.hs
index 9fd0c33423d4..4ec9ec7cbbbe 100644
--- a/compiler/iface/BinIface.hs
+++ b/compiler/iface/BinIface.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
--
-- (c) The University of Glasgow 2002-2006
--
@@ -258,7 +260,7 @@ getSymbolTable bh ncu = do
mapAccumR (fromOnDiskName arr) namecache od_names
in (namecache', arr)
-type OnDiskName = (PackageId, ModuleName, OccName)
+type OnDiskName = (PackageKey, ModuleName, OccName)
fromOnDiskName :: Array Int Name -> NameCache -> OnDiskName -> (NameCache, Name)
fromOnDiskName _ nc (pid, mod_name, occ) =
@@ -275,7 +277,7 @@ fromOnDiskName _ nc (pid, mod_name, occ) =
serialiseName :: BinHandle -> Name -> UniqFM (Int,Name) -> IO ()
serialiseName bh name _ = do
let mod = ASSERT2( isExternalName name, ppr name ) nameModule name
- put_ bh (modulePackageId mod, moduleName mod, nameOccName name)
+ put_ bh (modulePackageKey mod, moduleName mod, nameOccName name)
-- Note [Symbol table representation of names]
diff --git a/compiler/iface/BuildTyCl.lhs b/compiler/iface/BuildTyCl.lhs
index e412d7ef301c..46091adf80e4 100644
--- a/compiler/iface/BuildTyCl.lhs
+++ b/compiler/iface/BuildTyCl.lhs
@@ -4,7 +4,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -15,7 +16,7 @@ module BuildTyCl (
buildSynTyCon,
buildAlgTyCon,
buildDataCon,
- buildPatSyn, mkPatSynMatcherId, mkPatSynWrapperId,
+ buildPatSyn,
TcMethInfo, buildClass,
distinctAbstractTyConRhs, totallyAbstractTyConRhs,
mkNewTyConRhs, mkDataTyConRhs,
@@ -36,10 +37,9 @@ import MkId
import Class
import TyCon
import Type
-import TypeRep
-import TcType
import Id
import Coercion
+import TcType
import DynFlags
import TcRnMonad
@@ -184,67 +184,34 @@ mkDataConStupidTheta tycon arg_tys univ_tvs
------------------------------------------------------
-buildPatSyn :: Name -> Bool -> Bool
- -> [Var]
+buildPatSyn :: Name -> Bool
+ -> Id -> Maybe Id
+ -> [Type]
-> [TyVar] -> [TyVar] -- Univ and ext
-> ThetaType -> ThetaType -- Prov and req
-> Type -- Result type
- -> TyVar
- -> TcRnIf m n PatSyn
-buildPatSyn src_name declared_infix has_wrapper args univ_tvs ex_tvs prov_theta req_theta pat_ty tv
- = do { (matcher, _, _) <- mkPatSynMatcherId src_name args
- univ_tvs ex_tvs
- prov_theta req_theta
- pat_ty tv
- ; wrapper <- case has_wrapper of
- False -> return Nothing
- True -> fmap Just $
- mkPatSynWrapperId src_name args
- (univ_tvs ++ ex_tvs) (prov_theta ++ req_theta)
- pat_ty
- ; return $ mkPatSyn src_name declared_infix
- args
- univ_tvs ex_tvs
- prov_theta req_theta
- pat_ty
- matcher
- wrapper }
-
-mkPatSynMatcherId :: Name
- -> [Var]
- -> [TyVar]
- -> [TyVar]
- -> ThetaType -> ThetaType
- -> Type
- -> TyVar
- -> TcRnIf n m (Id, Type, Type)
-mkPatSynMatcherId name args univ_tvs ex_tvs prov_theta req_theta pat_ty res_tv
- = do { matcher_name <- newImplicitBinder name mkMatcherOcc
-
- ; let res_ty = TyVarTy res_tv
- cont_ty = mkSigmaTy ex_tvs prov_theta $
- mkFunTys (map varType args) res_ty
-
- ; let matcher_tau = mkFunTys [pat_ty, cont_ty, res_ty] res_ty
- matcher_sigma = mkSigmaTy (res_tv:univ_tvs) req_theta matcher_tau
- matcher_id = mkVanillaGlobal matcher_name matcher_sigma
- ; return (matcher_id, res_ty, cont_ty) }
-
-mkPatSynWrapperId :: Name
- -> [Var]
- -> [TyVar]
- -> ThetaType
- -> Type
- -> TcRnIf n m Id
-mkPatSynWrapperId name args qtvs theta pat_ty
- = do { wrapper_name <- newImplicitBinder name mkDataConWrapperOcc
-
- ; let wrapper_tau = mkFunTys (map varType args) pat_ty
- wrapper_sigma = mkSigmaTy qtvs theta wrapper_tau
-
- ; let wrapper_id = mkVanillaGlobal wrapper_name wrapper_sigma
- ; return wrapper_id }
-
+ -> PatSyn
+buildPatSyn src_name declared_infix matcher wrapper
+ args univ_tvs ex_tvs prov_theta req_theta pat_ty
+ = ASSERT((and [ univ_tvs == univ_tvs'
+ , ex_tvs == ex_tvs'
+ , pat_ty `eqType` pat_ty'
+ , prov_theta `eqTypes` prov_theta'
+ , req_theta `eqTypes` req_theta'
+ , args `eqTypes` args'
+ ]))
+ mkPatSyn src_name declared_infix
+ args
+ univ_tvs ex_tvs
+ prov_theta req_theta
+ pat_ty
+ matcher
+ wrapper
+ where
+ ((_:univ_tvs'), req_theta', tau) = tcSplitSigmaTy $ idType matcher
+ ([pat_ty', cont_sigma, _], _) = tcSplitFunTys tau
+ (ex_tvs', prov_theta', cont_tau) = tcSplitSigmaTy cont_sigma
+ (args', _) = tcSplitFunTys cont_tau
\end{code}
@@ -254,10 +221,7 @@ type TcMethInfo = (Name, DefMethSpec, Type)
-- A temporary intermediate, to communicate between
-- tcClassSigs and buildClass.
-buildClass :: Bool -- True <=> do not include unfoldings
- -- on dict selectors
- -- Used when importing a class without -O
- -> Name -> [TyVar] -> [Role] -> ThetaType
+buildClass :: Name -> [TyVar] -> [Role] -> ThetaType
-> [FunDep TyVar] -- Functional dependencies
-> [ClassATItem] -- Associated types
-> [TcMethInfo] -- Method info
@@ -265,10 +229,9 @@ buildClass :: Bool -- True <=> do not include unfoldings
-> RecFlag -- Info for type constructor
-> TcRnIf m n Class
-buildClass no_unf tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc_isrec
+buildClass tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc_isrec
= fixM $ \ rec_clas -> -- Only name generation inside loop
do { traceIf (text "buildClass")
- ; dflags <- getDynFlags
; datacon_name <- newImplicitBinder tycon_name mkClassDataConOcc
-- The class name is the 'parent' for this datacon, not its tycon,
@@ -282,7 +245,7 @@ buildClass no_unf tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc
-- Make selectors for the superclasses
; sc_sel_names <- mapM (newImplicitBinder tycon_name . mkSuperDictSelOcc)
[1..length sc_theta]
- ; let sc_sel_ids = [ mkDictSelId dflags no_unf sc_name rec_clas
+ ; let sc_sel_ids = [ mkDictSelId sc_name rec_clas
| sc_name <- sc_sel_names]
-- We number off the Dict superclass selectors, 1, 2, 3 etc so that we
-- can construct names for the selectors. Thus
@@ -348,14 +311,13 @@ buildClass no_unf tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc
where
mk_op_item :: Class -> TcMethInfo -> TcRnIf n m ClassOpItem
mk_op_item rec_clas (op_name, dm_spec, _)
- = do { dflags <- getDynFlags
- ; dm_info <- case dm_spec of
+ = do { dm_info <- case dm_spec of
NoDM -> return NoDefMeth
GenericDM -> do { dm_name <- newImplicitBinder op_name mkGenDefMethodOcc
; return (GenDefMeth dm_name) }
VanillaDM -> do { dm_name <- newImplicitBinder op_name mkDefaultMethodOcc
; return (DefMeth dm_name) }
- ; return (mkDictSelId dflags no_unf op_name rec_clas, dm_info) }
+ ; return (mkDictSelId op_name rec_clas, dm_info) }
\end{code}
Note [Class newtypes and equality predicates]
@@ -368,7 +330,7 @@ We cannot represent this by a newtype, even though it's not
existential, because there are two value fields (the equality
predicate and op. See Trac #2238
-Moreover,
+Moreover,
class (a ~ F b) => C a b where {}
Here we can't use a newtype either, even though there is only
one field, because equality predicates are unboxed, and classes
diff --git a/compiler/iface/IfaceEnv.lhs b/compiler/iface/IfaceEnv.lhs
index 42c3e326050a..c29778dc2380 100644
--- a/compiler/iface/IfaceEnv.lhs
+++ b/compiler/iface/IfaceEnv.lhs
@@ -1,7 +1,8 @@
(c) The University of Glasgow 2002-2006
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, RankNTypes #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -28,13 +29,10 @@ module IfaceEnv (
import TcRnMonad
import TysWiredIn
import HscTypes
-import TyCon
import Type
-import DataCon
import Var
import Name
import Avail
-import PrelNames
import Module
import UniqFM
import FastString
@@ -183,23 +181,34 @@ lookupOrig mod occ
See Note [The Name Cache] above.
+Note [Built-in syntax and the OrigNameCache]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+You might think that usin isBuiltInOcc_maybe in lookupOrigNameCache is
+unnecessary because tuple TyCon/DataCons are parsed as Exact RdrNames
+and *don't* appear as original names in interface files (because
+serialization gives them special treatment), so we will never look
+them up in the original name cache.
+
+However, there are two reasons why we might look up an Orig RdrName:
+
+ * If you use setRdrNameSpace on an Exact RdrName it may be
+ turned into an Orig RdrName.
+
+ * Template Haskell turns a BuiltInSyntax Name into a TH.NameG
+ (DsMeta.globalVar), and parses a NameG into an Orig RdrName
+ (Convert.thRdrName). So, eg $(do { reify '(,); ... }) will
+ go this route (Trac #8954).
+
\begin{code}
lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name
-lookupOrigNameCache _ mod occ
- -- Don't need to mention gHC_UNIT here because it is explicitly
- -- included in TysWiredIn.wiredInTyCons
- | mod == gHC_TUPLE || mod == gHC_PRIM, -- Boxed tuples from one,
- Just tup_info <- isTupleOcc_maybe occ -- unboxed from the other
- = -- Special case for tuples; there are too many
+lookupOrigNameCache nc mod occ
+ | Just name <- isBuiltInOcc_maybe occ
+ = -- See Note [Known-key names], 3(c) in PrelNames
+ -- Special case for tuples; there are too many
-- of them to pre-populate the original-name cache
- Just (mk_tup_name tup_info)
- where
- mk_tup_name (ns, sort, arity)
- | ns == tcName = tyConName (tupleTyCon sort arity)
- | ns == dataName = dataConName (tupleCon sort arity)
- | otherwise = Var.varName (dataConWorkId (tupleCon sort arity))
+ Just name
-lookupOrigNameCache nc mod occ -- The normal case
+ | otherwise
= case lookupModuleEnv nc mod of
Nothing -> Nothing
Just occ_env -> lookupOccEnv occ_env occ
diff --git a/compiler/iface/IfaceSyn.lhs b/compiler/iface/IfaceSyn.lhs
index b58230543466..9496a9f087ff 100644
--- a/compiler/iface/IfaceSyn.lhs
+++ b/compiler/iface/IfaceSyn.lhs
@@ -4,7 +4,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -15,13 +16,14 @@ module IfaceSyn (
module IfaceType,
IfaceDecl(..), IfaceSynTyConRhs(..), IfaceClassOp(..), IfaceAT(..),
- IfaceConDecl(..), IfaceConDecls(..),
+ IfaceConDecl(..), IfaceConDecls(..), IfaceEqSpec,
IfaceExpr(..), IfaceAlt, IfaceLetBndr(..),
IfaceBinding(..), IfaceConAlt(..),
IfaceIdInfo(..), IfaceIdDetails(..), IfaceUnfolding(..),
IfaceInfoItem(..), IfaceRule(..), IfaceAnnotation(..), IfaceAnnTarget,
IfaceClsInst(..), IfaceFamInst(..), IfaceTickish(..),
IfaceBang(..), IfaceAxBranch(..),
+ IfaceTyConParent(..),
-- Misc
ifaceDeclImplicitBndrs, visibleIfConDecls,
@@ -31,7 +33,9 @@ module IfaceSyn (
freeNamesIfDecl, freeNamesIfRule, freeNamesIfFamInst,
-- Pretty printing
- pprIfaceExpr
+ pprIfaceExpr,
+ pprIfaceDecl,
+ ShowSub(..), ShowHowMuch(..)
) where
#include "HsVersions.h"
@@ -51,13 +55,18 @@ import BasicTypes
import Outputable
import FastString
import Module
-import TysWiredIn ( eqTyConName )
+import SrcLoc
import Fingerprint
import Binary
import BooleanFormula ( BooleanFormula )
+import HsBinds
+import TyCon (Role (..))
+import StaticFlags (opt_PprStyle_Debug)
+import Util( filterOut )
import Control.Monad
import System.IO.Unsafe
+import Data.Maybe (isJust)
infixl 3 &&&
\end{code}
@@ -65,18 +74,27 @@ infixl 3 &&&
%************************************************************************
%* *
- Data type declarations
+ Declarations
%* *
%************************************************************************
\begin{code}
+type IfaceTopBndr = OccName
+ -- It's convenient to have an OccName in the IfaceSyn, altough in each
+ -- case the namespace is implied by the context. However, having an
+ -- OccNames makes things like ifaceDeclImplicitBndrs and ifaceDeclFingerprints
+ -- very convenient.
+ --
+ -- We don't serialise the namespace onto the disk though; rather we
+ -- drop it when serialising and add it back in when deserialising.
+
data IfaceDecl
- = IfaceId { ifName :: OccName,
+ = IfaceId { ifName :: IfaceTopBndr,
ifType :: IfaceType,
ifIdDetails :: IfaceIdDetails,
ifIdInfo :: IfaceIdInfo }
- | IfaceData { ifName :: OccName, -- Type constructor
+ | IfaceData { ifName :: IfaceTopBndr, -- Type constructor
ifCType :: Maybe CType, -- C type for CAPI FFI
ifTyVars :: [IfaceTvBndr], -- Type variables
ifRoles :: [Role], -- Roles
@@ -86,355 +104,115 @@ data IfaceDecl
ifPromotable :: Bool, -- Promotable to kind level?
ifGadtSyntax :: Bool, -- True <=> declared using
-- GADT syntax
- ifAxiom :: Maybe IfExtName -- The axiom, for a newtype,
- -- or data/newtype family instance
+ ifParent :: IfaceTyConParent -- The axiom, for a newtype,
+ -- or data/newtype family instance
}
- | IfaceSyn { ifName :: OccName, -- Type constructor
+ | IfaceSyn { ifName :: IfaceTopBndr, -- Type constructor
ifTyVars :: [IfaceTvBndr], -- Type variables
ifRoles :: [Role], -- Roles
ifSynKind :: IfaceKind, -- Kind of the *rhs* (not of the tycon)
ifSynRhs :: IfaceSynTyConRhs }
- | IfaceClass { ifCtxt :: IfaceContext, -- Context...
- ifName :: OccName, -- Name of the class TyCon
- ifTyVars :: [IfaceTvBndr], -- Type variables
- ifRoles :: [Role], -- Roles
- ifFDs :: [FunDep FastString], -- Functional dependencies
- ifATs :: [IfaceAT], -- Associated type families
- ifSigs :: [IfaceClassOp], -- Method signatures
- ifMinDef :: BooleanFormula OccName, -- Minimal complete definition
- ifRec :: RecFlag -- Is newtype/datatype associated
- -- with the class recursive?
+ | IfaceClass { ifCtxt :: IfaceContext, -- Context...
+ ifName :: IfaceTopBndr, -- Name of the class TyCon
+ ifTyVars :: [IfaceTvBndr], -- Type variables
+ ifRoles :: [Role], -- Roles
+ ifFDs :: [FunDep FastString], -- Functional dependencies
+ ifATs :: [IfaceAT], -- Associated type families
+ ifSigs :: [IfaceClassOp], -- Method signatures
+ ifMinDef :: BooleanFormula IfLclName, -- Minimal complete definition
+ ifRec :: RecFlag -- Is newtype/datatype associated
+ -- with the class recursive?
}
- | IfaceAxiom { ifName :: OccName, -- Axiom name
+ | IfaceAxiom { ifName :: IfaceTopBndr, -- Axiom name
ifTyCon :: IfaceTyCon, -- LHS TyCon
ifRole :: Role, -- Role of axiom
ifAxBranches :: [IfaceAxBranch] -- Branches
}
- | IfaceForeign { ifName :: OccName, -- Needs expanding when we move
+ | IfaceForeign { ifName :: IfaceTopBndr, -- Needs expanding when we move
-- beyond .NET
ifExtName :: Maybe FastString }
- | IfacePatSyn { ifName :: OccName, -- Name of the pattern synonym
- ifPatHasWrapper :: Bool,
+ | IfacePatSyn { ifName :: IfaceTopBndr, -- Name of the pattern synonym
ifPatIsInfix :: Bool,
+ ifPatMatcher :: IfExtName,
+ ifPatWrapper :: Maybe IfExtName,
+ -- Everything below is redundant,
+ -- but needed to implement pprIfaceDecl
ifPatUnivTvs :: [IfaceTvBndr],
ifPatExTvs :: [IfaceTvBndr],
ifPatProvCtxt :: IfaceContext,
ifPatReqCtxt :: IfaceContext,
- ifPatArgs :: [IfaceIdBndr],
+ ifPatArgs :: [IfaceType],
ifPatTy :: IfaceType }
--- A bit of magic going on here: there's no need to store the OccName
--- for a decl on the disk, since we can infer the namespace from the
--- context; however it is useful to have the OccName in the IfaceDecl
--- to avoid re-building it in various places. So we build the OccName
--- when de-serialising.
-
-instance Binary IfaceDecl where
- put_ bh (IfaceId name ty details idinfo) = do
- putByte bh 0
- put_ bh (occNameFS name)
- put_ bh ty
- put_ bh details
- put_ bh idinfo
-
- put_ _ (IfaceForeign _ _) =
- error "Binary.put_(IfaceDecl): IfaceForeign"
-
- put_ bh (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9 a10) = do
- putByte bh 2
- put_ bh (occNameFS a1)
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
- put_ bh a6
- put_ bh a7
- put_ bh a8
- put_ bh a9
- put_ bh a10
-
- put_ bh (IfaceSyn a1 a2 a3 a4 a5) = do
- putByte bh 3
- put_ bh (occNameFS a1)
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
-
- put_ bh (IfaceClass a1 a2 a3 a4 a5 a6 a7 a8 a9) = do
- putByte bh 4
- put_ bh a1
- put_ bh (occNameFS a2)
- put_ bh a3
- put_ bh a4
- put_ bh a5
- put_ bh a6
- put_ bh a7
- put_ bh a8
- put_ bh a9
-
- put_ bh (IfaceAxiom a1 a2 a3 a4) = do
- putByte bh 5
- put_ bh (occNameFS a1)
- put_ bh a2
- put_ bh a3
- put_ bh a4
-
- put_ bh (IfacePatSyn name a2 a3 a4 a5 a6 a7 a8 a9) = do
- putByte bh 6
- put_ bh (occNameFS name)
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
- put_ bh a6
- put_ bh a7
- put_ bh a8
- put_ bh a9
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do name <- get bh
- ty <- get bh
- details <- get bh
- idinfo <- get bh
- occ <- return $! mkOccNameFS varName name
- return (IfaceId occ ty details idinfo)
- 1 -> error "Binary.get(TyClDecl): ForeignType"
- 2 -> do a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- a6 <- get bh
- a7 <- get bh
- a8 <- get bh
- a9 <- get bh
- a10 <- get bh
- occ <- return $! mkOccNameFS tcName a1
- return (IfaceData occ a2 a3 a4 a5 a6 a7 a8 a9 a10)
- 3 -> do a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- occ <- return $! mkOccNameFS tcName a1
- return (IfaceSyn occ a2 a3 a4 a5)
- 4 -> do a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- a6 <- get bh
- a7 <- get bh
- a8 <- get bh
- a9 <- get bh
- occ <- return $! mkOccNameFS clsName a2
- return (IfaceClass a1 occ a3 a4 a5 a6 a7 a8 a9)
- 5 -> do a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- occ <- return $! mkOccNameFS tcName a1
- return (IfaceAxiom occ a2 a3 a4)
- 6 -> do a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- a6 <- get bh
- a7 <- get bh
- a8 <- get bh
- a9 <- get bh
- occ <- return $! mkOccNameFS dataName a1
- return (IfacePatSyn occ a2 a3 a4 a5 a6 a7 a8 a9)
- _ -> panic (unwords ["Unknown IfaceDecl tag:", show h])
+data IfaceTyConParent
+ = IfNoParent
+ | IfDataInstance IfExtName
+ IfaceTyCon
+ IfaceTcArgs
data IfaceSynTyConRhs
= IfaceOpenSynFamilyTyCon
- | IfaceClosedSynFamilyTyCon IfExtName -- name of associated axiom
+ | IfaceClosedSynFamilyTyCon IfExtName -- name of associated axiom
+ [IfaceAxBranch] -- for pretty printing purposes only
| IfaceAbstractClosedSynFamilyTyCon
| IfaceSynonymTyCon IfaceType
+ | IfaceBuiltInSynFamTyCon -- for pretty printing purposes only
-instance Binary IfaceSynTyConRhs where
- put_ bh IfaceOpenSynFamilyTyCon = putByte bh 0
- put_ bh (IfaceClosedSynFamilyTyCon ax) = putByte bh 1 >> put_ bh ax
- put_ bh IfaceAbstractClosedSynFamilyTyCon = putByte bh 2
- put_ bh (IfaceSynonymTyCon ty) = putByte bh 3 >> put_ bh ty
-
- get bh = do { h <- getByte bh
- ; case h of
- 0 -> return IfaceOpenSynFamilyTyCon
- 1 -> do { ax <- get bh
- ; return (IfaceClosedSynFamilyTyCon ax) }
- 2 -> return IfaceAbstractClosedSynFamilyTyCon
- _ -> do { ty <- get bh
- ; return (IfaceSynonymTyCon ty) } }
-
-data IfaceClassOp = IfaceClassOp OccName DefMethSpec IfaceType
+data IfaceClassOp = IfaceClassOp IfaceTopBndr DefMethSpec IfaceType
-- Nothing => no default method
-- Just False => ordinary polymorphic default method
-- Just True => generic default method
-instance Binary IfaceClassOp where
- put_ bh (IfaceClassOp n def ty) = do
- put_ bh (occNameFS n)
- put_ bh def
- put_ bh ty
- get bh = do
- n <- get bh
- def <- get bh
- ty <- get bh
- occ <- return $! mkOccNameFS varName n
- return (IfaceClassOp occ def ty)
-
-data IfaceAT = IfaceAT
- IfaceDecl -- The associated type declaration
- [IfaceAxBranch] -- Default associated type instances, if any
-
-instance Binary IfaceAT where
- put_ bh (IfaceAT dec defs) = do
- put_ bh dec
- put_ bh defs
- get bh = do
- dec <- get bh
- defs <- get bh
- return (IfaceAT dec defs)
-
-instance Outputable IfaceAxBranch where
- ppr = pprAxBranch Nothing
-
-pprAxBranch :: Maybe IfaceTyCon -> IfaceAxBranch -> SDoc
-pprAxBranch mtycon (IfaceAxBranch { ifaxbTyVars = tvs
- , ifaxbLHS = pat_tys
- , ifaxbRHS = ty
- , ifaxbIncomps = incomps })
- = ppr tvs <+> ppr_lhs <+> char '=' <+> ppr ty $+$
- nest 2 maybe_incomps
- where
- ppr_lhs
- | Just tycon <- mtycon
- = ppr (IfaceTyConApp tycon pat_tys)
- | otherwise
- = hsep (map ppr pat_tys)
-
- maybe_incomps
- | [] <- incomps
- = empty
+data IfaceAT = IfaceAT -- See Class.ClassATItem
+ IfaceDecl -- The associated type declaration
+ (Maybe IfaceType) -- Default associated type instance, if any
- | otherwise
- = parens (ptext (sLit "incompatible indices:") <+> ppr incomps)
--- this is just like CoAxBranch
+-- This is just like CoAxBranch
data IfaceAxBranch = IfaceAxBranch { ifaxbTyVars :: [IfaceTvBndr]
- , ifaxbLHS :: [IfaceType]
+ , ifaxbLHS :: IfaceTcArgs
, ifaxbRoles :: [Role]
, ifaxbRHS :: IfaceType
, ifaxbIncomps :: [BranchIndex] }
-- See Note [Storing compatibility] in CoAxiom
-instance Binary IfaceAxBranch where
- put_ bh (IfaceAxBranch a1 a2 a3 a4 a5) = do
- put_ bh a1
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
- get bh = do
- a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- return (IfaceAxBranch a1 a2 a3 a4 a5)
-
data IfaceConDecls
= IfAbstractTyCon Bool -- c.f TyCon.AbstractTyCon
| IfDataFamTyCon -- Data family
| IfDataTyCon [IfaceConDecl] -- Data type decls
| IfNewTyCon IfaceConDecl -- Newtype decls
-instance Binary IfaceConDecls where
- put_ bh (IfAbstractTyCon d) = putByte bh 0 >> put_ bh d
- put_ bh IfDataFamTyCon = putByte bh 1
- put_ bh (IfDataTyCon cs) = putByte bh 2 >> put_ bh cs
- put_ bh (IfNewTyCon c) = putByte bh 3 >> put_ bh c
- get bh = do
- h <- getByte bh
- case h of
- 0 -> liftM IfAbstractTyCon $ get bh
- 1 -> return IfDataFamTyCon
- 2 -> liftM IfDataTyCon $ get bh
- _ -> liftM IfNewTyCon $ get bh
-
-visibleIfConDecls :: IfaceConDecls -> [IfaceConDecl]
-visibleIfConDecls (IfAbstractTyCon {}) = []
-visibleIfConDecls IfDataFamTyCon = []
-visibleIfConDecls (IfDataTyCon cs) = cs
-visibleIfConDecls (IfNewTyCon c) = [c]
-
data IfaceConDecl
= IfCon {
- ifConOcc :: OccName, -- Constructor name
+ ifConOcc :: IfaceTopBndr, -- Constructor name
ifConWrapper :: Bool, -- True <=> has a wrapper
ifConInfix :: Bool, -- True <=> declared infix
- ifConUnivTvs :: [IfaceTvBndr], -- Universal tyvars
+
+ -- The universal type variables are precisely those
+ -- of the type constructor of this data constructor
+ -- This is *easy* to guarantee when creating the IfCon
+ -- but it's not so easy for the original TyCon/DataCon
+ -- So this guarantee holds for IfaceConDecl, but *not* for DataCon
+
ifConExTvs :: [IfaceTvBndr], -- Existential tyvars
- ifConEqSpec :: [(OccName,IfaceType)], -- Equality constraints
+ ifConEqSpec :: IfaceEqSpec, -- Equality constraints
ifConCtxt :: IfaceContext, -- Non-stupid context
ifConArgTys :: [IfaceType], -- Arg types
- ifConFields :: [OccName], -- ...ditto... (field labels)
+ ifConFields :: [IfaceTopBndr], -- ...ditto... (field labels)
ifConStricts :: [IfaceBang]} -- Empty (meaning all lazy),
-- or 1-1 corresp with arg tys
-instance Binary IfaceConDecl where
- put_ bh (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10) = do
- put_ bh a1
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
- put_ bh a6
- put_ bh a7
- put_ bh a8
- put_ bh a9
- put_ bh a10
- get bh = do
- a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- a6 <- get bh
- a7 <- get bh
- a8 <- get bh
- a9 <- get bh
- a10 <- get bh
- return (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10)
+type IfaceEqSpec = [(IfLclName,IfaceType)]
data IfaceBang
= IfNoBang | IfStrict | IfUnpack | IfUnpackCo IfaceCoercion
-instance Binary IfaceBang where
- put_ bh IfNoBang = putByte bh 0
- put_ bh IfStrict = putByte bh 1
- put_ bh IfUnpack = putByte bh 2
- put_ bh (IfUnpackCo co) = putByte bh 3 >> put_ bh co
-
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do return IfNoBang
- 1 -> do return IfStrict
- 2 -> do return IfUnpack
- _ -> do { a <- get bh; return (IfUnpackCo a) }
-
data IfaceClsInst
= IfaceClsInst { ifInstCls :: IfExtName, -- See comments with
ifInstTys :: [Maybe IfaceTyCon], -- the defn of ClsInst
@@ -448,21 +226,6 @@ data IfaceClsInst
-- If this instance decl is *used*, we'll record a usage on the dfun;
-- and if the head does not change it won't be used if it wasn't before
-instance Binary IfaceClsInst where
- put_ bh (IfaceClsInst cls tys dfun flag orph) = do
- put_ bh cls
- put_ bh tys
- put_ bh dfun
- put_ bh flag
- put_ bh orph
- get bh = do
- cls <- get bh
- tys <- get bh
- dfun <- get bh
- flag <- get bh
- orph <- get bh
- return (IfaceClsInst cls tys dfun flag orph)
-
-- The ifFamInstTys field of IfaceFamInst contains a list of the rough
-- match types
data IfaceFamInst
@@ -472,19 +235,6 @@ data IfaceFamInst
, ifFamInstOrph :: Maybe OccName -- Just like IfaceClsInst
}
-instance Binary IfaceFamInst where
- put_ bh (IfaceFamInst fam tys name orph) = do
- put_ bh fam
- put_ bh tys
- put_ bh name
- put_ bh orph
- get bh = do
- fam <- get bh
- tys <- get bh
- name <- get bh
- orph <- get bh
- return (IfaceFamInst fam tys name orph)
-
data IfaceRule
= IfaceRule {
ifRuleName :: RuleName,
@@ -497,82 +247,14 @@ data IfaceRule
ifRuleOrph :: Maybe OccName -- Just like IfaceClsInst
}
-instance Binary IfaceRule where
- put_ bh (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) = do
- put_ bh a1
- put_ bh a2
- put_ bh a3
- put_ bh a4
- put_ bh a5
- put_ bh a6
- put_ bh a7
- put_ bh a8
- get bh = do
- a1 <- get bh
- a2 <- get bh
- a3 <- get bh
- a4 <- get bh
- a5 <- get bh
- a6 <- get bh
- a7 <- get bh
- a8 <- get bh
- return (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8)
-
data IfaceAnnotation
= IfaceAnnotation {
ifAnnotatedTarget :: IfaceAnnTarget,
ifAnnotatedValue :: AnnPayload
}
-instance Outputable IfaceAnnotation where
- ppr (IfaceAnnotation target value) = ppr target <+> colon <+> ppr value
-
-instance Binary IfaceAnnotation where
- put_ bh (IfaceAnnotation a1 a2) = do
- put_ bh a1
- put_ bh a2
- get bh = do
- a1 <- get bh
- a2 <- get bh
- return (IfaceAnnotation a1 a2)
-
type IfaceAnnTarget = AnnTarget OccName
--- We only serialise the IdDetails of top-level Ids, and even then
--- we only need a very limited selection. Notably, none of the
--- implicit ones are needed here, because they are not put it
--- interface files
-
-data IfaceIdDetails
- = IfVanillaId
- | IfRecSelId IfaceTyCon Bool
- | IfDFunId Int -- Number of silent args
-
-instance Binary IfaceIdDetails where
- put_ bh IfVanillaId = putByte bh 0
- put_ bh (IfRecSelId a b) = putByte bh 1 >> put_ bh a >> put_ bh b
- put_ bh (IfDFunId n) = do { putByte bh 2; put_ bh n }
- get bh = do
- h <- getByte bh
- case h of
- 0 -> return IfVanillaId
- 1 -> do { a <- get bh; b <- get bh; return (IfRecSelId a b) }
- _ -> do { n <- get bh; return (IfDFunId n) }
-
-data IfaceIdInfo
- = NoInfo -- When writing interface file without -O
- | HasInfo [IfaceInfoItem] -- Has info, and here it is
-
-instance Binary IfaceIdInfo where
- put_ bh NoInfo = putByte bh 0
- put_ bh (HasInfo i) = putByte bh 1 >> lazyPut bh i -- NB lazyPut
-
- get bh = do
- h <- getByte bh
- case h of
- 0 -> return NoInfo
- _ -> liftM HasInfo $ lazyGet bh -- NB lazyGet
-
-- Here's a tricky case:
-- * Compile with -O module A, and B which imports A.f
-- * Change function f in A, and recompile without -O
@@ -583,6 +265,10 @@ instance Binary IfaceIdInfo where
-- * The version comparison sees that new (=NoInfo) differs from old (=HasInfo *)
-- and so gives a new version.
+data IfaceIdInfo
+ = NoInfo -- When writing interface file without -O
+ | HasInfo [IfaceInfoItem] -- Has info, and here it is
+
data IfaceInfoItem
= HsArity Arity
| HsStrictness StrictSig
@@ -591,23 +277,6 @@ data IfaceInfoItem
IfaceUnfolding -- See Note [Expose recursive functions]
| HsNoCafRefs
-instance Binary IfaceInfoItem where
- put_ bh (HsArity aa) = putByte bh 0 >> put_ bh aa
- put_ bh (HsStrictness ab) = putByte bh 1 >> put_ bh ab
- put_ bh (HsUnfold lb ad) = putByte bh 2 >> put_ bh lb >> put_ bh ad
- put_ bh (HsInline ad) = putByte bh 3 >> put_ bh ad
- put_ bh HsNoCafRefs = putByte bh 4
- get bh = do
- h <- getByte bh
- case h of
- 0 -> liftM HsArity $ get bh
- 1 -> liftM HsStrictness $ get bh
- 2 -> do lb <- get bh
- ad <- get bh
- return (HsUnfold lb ad)
- 3 -> liftM HsInline $ get bh
- _ -> return HsNoCafRefs
-
-- NB: Specialisations and rules come in separately and are
-- only later attached to the Id. Partial reason: some are orphans.
@@ -625,253 +294,19 @@ data IfaceUnfolding
| IfDFunUnfold [IfaceBndr] [IfaceExpr]
-instance Binary IfaceUnfolding where
- put_ bh (IfCoreUnfold s e) = do
- putByte bh 0
- put_ bh s
- put_ bh e
- put_ bh (IfInlineRule a b c d) = do
- putByte bh 1
- put_ bh a
- put_ bh b
- put_ bh c
- put_ bh d
- put_ bh (IfDFunUnfold as bs) = do
- putByte bh 2
- put_ bh as
- put_ bh bs
- put_ bh (IfCompulsory e) = do
- putByte bh 3
- put_ bh e
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do s <- get bh
- e <- get bh
- return (IfCoreUnfold s e)
- 1 -> do a <- get bh
- b <- get bh
- c <- get bh
- d <- get bh
- return (IfInlineRule a b c d)
- 2 -> do as <- get bh
- bs <- get bh
- return (IfDFunUnfold as bs)
- _ -> do e <- get bh
- return (IfCompulsory e)
---------------------------------
-data IfaceExpr
- = IfaceLcl IfLclName
- | IfaceExt IfExtName
- | IfaceType IfaceType
- | IfaceCo IfaceCoercion
- | IfaceTuple TupleSort [IfaceExpr] -- Saturated; type arguments omitted
- | IfaceLam IfaceBndr IfaceExpr
- | IfaceApp IfaceExpr IfaceExpr
- | IfaceCase IfaceExpr IfLclName [IfaceAlt]
- | IfaceECase IfaceExpr IfaceType -- See Note [Empty case alternatives]
- | IfaceLet IfaceBinding IfaceExpr
- | IfaceCast IfaceExpr IfaceCoercion
- | IfaceLit Literal
- | IfaceFCall ForeignCall IfaceType
- | IfaceTick IfaceTickish IfaceExpr -- from Tick tickish E
+-- We only serialise the IdDetails of top-level Ids, and even then
+-- we only need a very limited selection. Notably, none of the
+-- implicit ones are needed here, because they are not put it
+-- interface files
-instance Binary IfaceExpr where
- put_ bh (IfaceLcl aa) = do
- putByte bh 0
- put_ bh aa
- put_ bh (IfaceType ab) = do
- putByte bh 1
- put_ bh ab
- put_ bh (IfaceCo ab) = do
- putByte bh 2
- put_ bh ab
- put_ bh (IfaceTuple ac ad) = do
- putByte bh 3
- put_ bh ac
- put_ bh ad
- put_ bh (IfaceLam ae af) = do
- putByte bh 4
- put_ bh ae
- put_ bh af
- put_ bh (IfaceApp ag ah) = do
- putByte bh 5
- put_ bh ag
- put_ bh ah
- put_ bh (IfaceCase ai aj ak) = do
- putByte bh 6
- put_ bh ai
- put_ bh aj
- put_ bh ak
- put_ bh (IfaceLet al am) = do
- putByte bh 7
- put_ bh al
- put_ bh am
- put_ bh (IfaceTick an ao) = do
- putByte bh 8
- put_ bh an
- put_ bh ao
- put_ bh (IfaceLit ap) = do
- putByte bh 9
- put_ bh ap
- put_ bh (IfaceFCall as at) = do
- putByte bh 10
- put_ bh as
- put_ bh at
- put_ bh (IfaceExt aa) = do
- putByte bh 11
- put_ bh aa
- put_ bh (IfaceCast ie ico) = do
- putByte bh 12
- put_ bh ie
- put_ bh ico
- put_ bh (IfaceECase a b) = do
- putByte bh 13
- put_ bh a
- put_ bh b
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do aa <- get bh
- return (IfaceLcl aa)
- 1 -> do ab <- get bh
- return (IfaceType ab)
- 2 -> do ab <- get bh
- return (IfaceCo ab)
- 3 -> do ac <- get bh
- ad <- get bh
- return (IfaceTuple ac ad)
- 4 -> do ae <- get bh
- af <- get bh
- return (IfaceLam ae af)
- 5 -> do ag <- get bh
- ah <- get bh
- return (IfaceApp ag ah)
- 6 -> do ai <- get bh
- aj <- get bh
- ak <- get bh
- return (IfaceCase ai aj ak)
- 7 -> do al <- get bh
- am <- get bh
- return (IfaceLet al am)
- 8 -> do an <- get bh
- ao <- get bh
- return (IfaceTick an ao)
- 9 -> do ap <- get bh
- return (IfaceLit ap)
- 10 -> do as <- get bh
- at <- get bh
- return (IfaceFCall as at)
- 11 -> do aa <- get bh
- return (IfaceExt aa)
- 12 -> do ie <- get bh
- ico <- get bh
- return (IfaceCast ie ico)
- 13 -> do a <- get bh
- b <- get bh
- return (IfaceECase a b)
- _ -> panic ("get IfaceExpr " ++ show h)
+data IfaceIdDetails
+ = IfVanillaId
+ | IfRecSelId IfaceTyCon Bool
+ | IfDFunId Int -- Number of silent args
-data IfaceTickish
- = IfaceHpcTick Module Int -- from HpcTick x
- | IfaceSCC CostCentre Bool Bool -- from ProfNote
- -- no breakpoints: we never export these into interface files
+\end{code}
-instance Binary IfaceTickish where
- put_ bh (IfaceHpcTick m ix) = do
- putByte bh 0
- put_ bh m
- put_ bh ix
- put_ bh (IfaceSCC cc tick push) = do
- putByte bh 1
- put_ bh cc
- put_ bh tick
- put_ bh push
-
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do m <- get bh
- ix <- get bh
- return (IfaceHpcTick m ix)
- 1 -> do cc <- get bh
- tick <- get bh
- push <- get bh
- return (IfaceSCC cc tick push)
- _ -> panic ("get IfaceTickish " ++ show h)
-
-type IfaceAlt = (IfaceConAlt, [IfLclName], IfaceExpr)
- -- Note: IfLclName, not IfaceBndr (and same with the case binder)
- -- We reconstruct the kind/type of the thing from the context
- -- thus saving bulk in interface files
-
-data IfaceConAlt = IfaceDefault
- | IfaceDataAlt IfExtName
- | IfaceLitAlt Literal
-
-instance Binary IfaceConAlt where
- put_ bh IfaceDefault = putByte bh 0
- put_ bh (IfaceDataAlt aa) = putByte bh 1 >> put_ bh aa
- put_ bh (IfaceLitAlt ac) = putByte bh 2 >> put_ bh ac
- get bh = do
- h <- getByte bh
- case h of
- 0 -> return IfaceDefault
- 1 -> liftM IfaceDataAlt $ get bh
- _ -> liftM IfaceLitAlt $ get bh
-
-data IfaceBinding
- = IfaceNonRec IfaceLetBndr IfaceExpr
- | IfaceRec [(IfaceLetBndr, IfaceExpr)]
-
-instance Binary IfaceBinding where
- put_ bh (IfaceNonRec aa ab) = putByte bh 0 >> put_ bh aa >> put_ bh ab
- put_ bh (IfaceRec ac) = putByte bh 1 >> put_ bh ac
- get bh = do
- h <- getByte bh
- case h of
- 0 -> do { aa <- get bh; ab <- get bh; return (IfaceNonRec aa ab) }
- _ -> do { ac <- get bh; return (IfaceRec ac) }
-
--- IfaceLetBndr is like IfaceIdBndr, but has IdInfo too
--- It's used for *non-top-level* let/rec binders
--- See Note [IdInfo on nested let-bindings]
-data IfaceLetBndr = IfLetBndr IfLclName IfaceType IfaceIdInfo
-
-instance Binary IfaceLetBndr where
- put_ bh (IfLetBndr a b c) = do
- put_ bh a
- put_ bh b
- put_ bh c
- get bh = do a <- get bh
- b <- get bh
- c <- get bh
- return (IfLetBndr a b c)
-\end{code}
-
-Note [Empty case alternatives]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In IfaceSyn an IfaceCase does not record the types of the alternatives,
-unlike CorSyn Case. But we need this type if the alternatives are empty.
-Hence IfaceECase. See Note [Empty case alternatives] in CoreSyn.
-
-Note [Expose recursive functions]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For supercompilation we want to put *all* unfoldings in the interface
-file, even for functions that are recursive (or big). So we need to
-know when an unfolding belongs to a loop-breaker so that we can refrain
-from inlining it (except during supercompilation).
-
-Note [IdInfo on nested let-bindings]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Occasionally we want to preserve IdInfo on nested let bindings. The one
-that came up was a NOINLINE pragma on a let-binding inside an INLINE
-function. The user (Duncan Coutts) really wanted the NOINLINE control
-to cross the separate compilation boundary.
-
-In general we retain all info that is left by CoreTidy.tidyLetBndr, since
-that is what is seen by importing module with --make
Note [Orphans]: the ifInstOrph and ifRuleOrph fields
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -948,10 +383,22 @@ Note [Versioning of instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See [http://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/RecompilationAvoidance#Instances]
+
+%************************************************************************
+%* *
+ Functions over declarations
+%* *
+%************************************************************************
+
\begin{code}
--- -----------------------------------------------------------------------------
--- Utils on IfaceSyn
+visibleIfConDecls :: IfaceConDecls -> [IfaceConDecl]
+visibleIfConDecls (IfAbstractTyCon {}) = []
+visibleIfConDecls IfDataFamTyCon = []
+visibleIfConDecls (IfDataTyCon cs) = cs
+visibleIfConDecls (IfNewTyCon c) = [c]
+\end{code}
+\begin{code}
ifaceDeclImplicitBndrs :: IfaceDecl -> [OccName]
-- *Excludes* the 'main' name, but *includes* the implicitly-bound names
-- Deeply revolting, because it has to predict what gets bound,
@@ -1014,11 +461,6 @@ ifaceDeclImplicitBndrs (IfaceClass {ifCtxt = sc_ctxt, ifName = cls_tc_occ,
dc_occ = mkClassDataConOcc cls_tc_occ
is_newtype = n_sigs + n_ctxt == 1 -- Sigh
-ifaceDeclImplicitBndrs (IfacePatSyn{ ifName = ps_occ, ifPatHasWrapper = has_wrapper })
- = [wrap_occ | has_wrapper]
- where
- wrap_occ = mkDataConWrapperOcc ps_occ -- Id namespace
-
ifaceDeclImplicitBndrs _ = []
-- -----------------------------------------------------------------------------
@@ -1037,159 +479,418 @@ ifaceDeclFingerprints hash decl
computeFingerprint' =
unsafeDupablePerformIO
. computeFingerprint (panic "ifaceDeclFingerprints")
+\end{code}
------------------------------ Printing IfaceDecl ------------------------------
+%************************************************************************
+%* *
+ Expressions
+%* *
+%************************************************************************
-instance Outputable IfaceDecl where
- ppr = pprIfaceDecl
+\begin{code}
+data IfaceExpr
+ = IfaceLcl IfLclName
+ | IfaceExt IfExtName
+ | IfaceType IfaceType
+ | IfaceCo IfaceCoercion
+ | IfaceTuple TupleSort [IfaceExpr] -- Saturated; type arguments omitted
+ | IfaceLam IfaceBndr IfaceExpr
+ | IfaceApp IfaceExpr IfaceExpr
+ | IfaceCase IfaceExpr IfLclName [IfaceAlt]
+ | IfaceECase IfaceExpr IfaceType -- See Note [Empty case alternatives]
+ | IfaceLet IfaceBinding IfaceExpr
+ | IfaceCast IfaceExpr IfaceCoercion
+ | IfaceLit Literal
+ | IfaceFCall ForeignCall IfaceType
+ | IfaceTick IfaceTickish IfaceExpr -- from Tick tickish E
+
+data IfaceTickish
+ = IfaceHpcTick Module Int -- from HpcTick x
+ | IfaceSCC CostCentre Bool Bool -- from ProfNote
+ | IfaceSource RealSrcSpan String -- from SourceNote
+ -- no breakpoints: we never export these into interface files
-pprIfaceDecl :: IfaceDecl -> SDoc
-pprIfaceDecl (IfaceId {ifName = var, ifType = ty,
- ifIdDetails = details, ifIdInfo = info})
- = sep [ ppr var <+> dcolon <+> ppr ty,
- nest 2 (ppr details),
- nest 2 (ppr info) ]
+type IfaceAlt = (IfaceConAlt, [IfLclName], IfaceExpr)
+ -- Note: IfLclName, not IfaceBndr (and same with the case binder)
+ -- We reconstruct the kind/type of the thing from the context
+ -- thus saving bulk in interface files
-pprIfaceDecl (IfaceForeign {ifName = tycon})
- = hsep [ptext (sLit "foreign import type dotnet"), ppr tycon]
+data IfaceConAlt = IfaceDefault
+ | IfaceDataAlt IfExtName
+ | IfaceLitAlt Literal
+
+data IfaceBinding
+ = IfaceNonRec IfaceLetBndr IfaceExpr
+ | IfaceRec [(IfaceLetBndr, IfaceExpr)]
+
+-- IfaceLetBndr is like IfaceIdBndr, but has IdInfo too
+-- It's used for *non-top-level* let/rec binders
+-- See Note [IdInfo on nested let-bindings]
+data IfaceLetBndr = IfLetBndr IfLclName IfaceType IfaceIdInfo
+\end{code}
+
+Note [Empty case alternatives]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In IfaceSyn an IfaceCase does not record the types of the alternatives,
+unlike CorSyn Case. But we need this type if the alternatives are empty.
+Hence IfaceECase. See Note [Empty case alternatives] in CoreSyn.
+
+Note [Expose recursive functions]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For supercompilation we want to put *all* unfoldings in the interface
+file, even for functions that are recursive (or big). So we need to
+know when an unfolding belongs to a loop-breaker so that we can refrain
+from inlining it (except during supercompilation).
+
+Note [IdInfo on nested let-bindings]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Occasionally we want to preserve IdInfo on nested let bindings. The one
+that came up was a NOINLINE pragma on a let-binding inside an INLINE
+function. The user (Duncan Coutts) really wanted the NOINLINE control
+to cross the separate compilation boundary.
-pprIfaceDecl (IfaceSyn {ifName = tycon,
- ifTyVars = tyvars,
- ifSynRhs = IfaceSynonymTyCon mono_ty})
- = hang (ptext (sLit "type") <+> pprIfaceDeclHead [] tycon tyvars)
- 2 (vcat [equals <+> ppr mono_ty])
+In general we retain all info that is left by CoreTidy.tidyLetBndr, since
+that is what is seen by importing module with --make
+
+
+%************************************************************************
+%* *
+ Printing IfaceDecl
+%* *
+%************************************************************************
-pprIfaceDecl (IfaceSyn {ifName = tycon, ifTyVars = tyvars,
- ifSynRhs = rhs, ifSynKind = kind })
- = hang (ptext (sLit "type family") <+> pprIfaceDeclHead [] tycon tyvars)
- 2 (sep [dcolon <+> ppr kind, parens (pp_rhs rhs)])
+\begin{code}
+pprAxBranch :: SDoc -> IfaceAxBranch -> SDoc
+-- The TyCon might be local (just an OccName), or this might
+-- be a branch for an imported TyCon, so it would be an ExtName
+-- So it's easier to take an SDoc here
+pprAxBranch pp_tc (IfaceAxBranch { ifaxbTyVars = tvs
+ , ifaxbLHS = pat_tys
+ , ifaxbRHS = rhs
+ , ifaxbIncomps = incomps })
+ = hang (pprUserIfaceForAll tvs)
+ 2 (hang pp_lhs 2 (equals <+> ppr rhs))
+ $+$
+ nest 2 maybe_incomps
where
- pp_rhs IfaceOpenSynFamilyTyCon = ptext (sLit "open")
- pp_rhs (IfaceClosedSynFamilyTyCon ax) = ptext (sLit "closed, axiom") <+> ppr ax
- pp_rhs IfaceAbstractClosedSynFamilyTyCon = ptext (sLit "closed, abstract")
- pp_rhs _ = panic "pprIfaceDecl syn"
+ pp_lhs = hang pp_tc 2 (pprParendIfaceTcArgs pat_tys)
+ maybe_incomps = ppUnless (null incomps) $ parens $
+ ptext (sLit "incompatible indices:") <+> ppr incomps
+
+instance Outputable IfaceAnnotation where
+ ppr (IfaceAnnotation target value) = ppr target <+> colon <+> ppr value
-pprIfaceDecl (IfaceData {ifName = tycon, ifCType = cType,
- ifCtxt = context,
- ifTyVars = tyvars, ifRoles = roles, ifCons = condecls,
- ifRec = isrec, ifPromotable = is_prom,
- ifAxiom = mbAxiom})
- = hang (pp_nd <+> pprIfaceDeclHead context tycon tyvars)
- 2 (vcat [ pprCType cType
- , pprRoles roles
- , pprRec isrec <> comma <+> pp_prom
- , pp_condecls tycon condecls
- , pprAxiom mbAxiom])
+instance HasOccName IfaceClassOp where
+ occName (IfaceClassOp n _ _) = n
+
+instance HasOccName IfaceConDecl where
+ occName = ifConOcc
+
+instance HasOccName IfaceDecl where
+ occName = ifName
+
+instance Outputable IfaceDecl where
+ ppr = pprIfaceDecl showAll
+
+data ShowSub
+ = ShowSub
+ { ss_ppr_bndr :: OccName -> SDoc -- Pretty-printer for binders in IfaceDecl
+ -- See Note [Printing IfaceDecl binders]
+ , ss_how_much :: ShowHowMuch }
+
+data ShowHowMuch
+ = ShowHeader -- Header information only, not rhs
+ | ShowSome [OccName] -- [] <=> Print all sub-components
+ -- (n:ns) <=> print sub-component 'n' with ShowSub=ns
+ -- elide other sub-components to "..."
+ -- May 14: the list is max 1 element long at the moment
+ | ShowIface -- Everything including GHC-internal information (used in --show-iface)
+
+showAll :: ShowSub
+showAll = ShowSub { ss_how_much = ShowIface, ss_ppr_bndr = ppr }
+
+ppShowIface :: ShowSub -> SDoc -> SDoc
+ppShowIface (ShowSub { ss_how_much = ShowIface }) doc = doc
+ppShowIface _ _ = empty
+
+ppShowRhs :: ShowSub -> SDoc -> SDoc
+ppShowRhs (ShowSub { ss_how_much = ShowHeader }) _ = empty
+ppShowRhs _ doc = doc
+
+showSub :: HasOccName n => ShowSub -> n -> Bool
+showSub (ShowSub { ss_how_much = ShowHeader }) _ = False
+showSub (ShowSub { ss_how_much = ShowSome (n:_) }) thing = n == occName thing
+showSub (ShowSub { ss_how_much = _ }) _ = True
+\end{code}
+
+Note [Printing IfaceDecl binders]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The binders in an IfaceDecl are just OccNames, so we don't know what module they
+come from. But when we pretty-print a TyThing by converting to an IfaceDecl
+(see PprTyThing), the TyThing may come from some other module so we really need
+the module qualifier. We solve this by passing in a pretty-printer for the
+binders.
+
+When printing an interface file (--show-iface), we want to print
+everything unqualified, so we can just print the OccName directly.
+
+\begin{code}
+ppr_trim :: [Maybe SDoc] -> [SDoc]
+-- Collapse a group of Nothings to a single "..."
+ppr_trim xs
+ = snd (foldr go (False, []) xs)
where
- pp_prom | is_prom = ptext (sLit "Promotable")
- | otherwise = ptext (sLit "Not promotable")
+ go (Just doc) (_, so_far) = (False, doc : so_far)
+ go Nothing (True, so_far) = (True, so_far)
+ go Nothing (False, so_far) = (True, ptext (sLit "...") : so_far)
+
+isIfaceDataInstance :: IfaceTyConParent -> Bool
+isIfaceDataInstance IfNoParent = False
+isIfaceDataInstance _ = True
+
+pprIfaceDecl :: ShowSub -> IfaceDecl -> SDoc
+-- NB: pprIfaceDecl is also used for pretty-printing TyThings in GHCi
+-- See Note [Pretty-printing TyThings] in PprTyThing
+pprIfaceDecl ss (IfaceData { ifName = tycon, ifCType = ctype,
+ ifCtxt = context, ifTyVars = tc_tyvars,
+ ifRoles = roles, ifCons = condecls,
+ ifParent = parent, ifRec = isrec,
+ ifGadtSyntax = gadt,
+ ifPromotable = is_prom })
+
+ | gadt_style = vcat [ pp_roles
+ , pp_nd <+> pp_lhs <+> pp_where
+ , nest 2 (vcat pp_cons)
+ , nest 2 $ ppShowIface ss pp_extra ]
+ | otherwise = vcat [ pp_roles
+ , hang (pp_nd <+> pp_lhs) 2 (add_bars pp_cons)
+ , nest 2 $ ppShowIface ss pp_extra ]
+ where
+ is_data_instance = isIfaceDataInstance parent
+
+ gadt_style = gadt || any (not . isVanillaIfaceConDecl) cons
+ cons = visibleIfConDecls condecls
+ pp_where = ppWhen (gadt_style && not (null cons)) $ ptext (sLit "where")
+ pp_cons = ppr_trim (map show_con cons) :: [SDoc]
+
+ pp_lhs = case parent of
+ IfNoParent -> pprIfaceDeclHead context ss tycon tc_tyvars
+ _ -> ptext (sLit "instance") <+> pprIfaceTyConParent parent
+
+ pp_roles
+ | is_data_instance = empty
+ | otherwise = pprRoles (== Representational) (pprPrefixIfDeclBndr ss tycon)
+ tc_tyvars roles
+ -- Don't display roles for data family instances (yet)
+ -- See discussion on Trac #8672.
+
+ add_bars [] = empty
+ add_bars (c:cs) = sep ((equals <+> c) : map (char '|' <+>) cs)
+
+ ok_con dc = showSub ss dc || any (showSub ss) (ifConFields dc)
+
+ show_con dc
+ | ok_con dc = Just $ pprIfaceConDecl ss gadt_style mk_user_con_res_ty dc
+ | otherwise = Nothing
+
+ mk_user_con_res_ty :: IfaceEqSpec -> ([IfaceTvBndr], SDoc)
+ -- See Note [Result type of a data family GADT]
+ mk_user_con_res_ty eq_spec
+ | IfDataInstance _ tc tys <- parent
+ = (con_univ_tvs, pprIfaceType (IfaceTyConApp tc (substIfaceTcArgs gadt_subst tys)))
+ | otherwise
+ = (con_univ_tvs, sdocWithDynFlags (ppr_tc_app gadt_subst))
+ where
+ gadt_subst = mkFsEnv eq_spec
+ done_univ_tv (tv,_) = isJust (lookupFsEnv gadt_subst tv)
+ con_univ_tvs = filterOut done_univ_tv tc_tyvars
+
+ ppr_tc_app gadt_subst dflags
+ = pprPrefixIfDeclBndr ss tycon
+ <+> sep [ pprParendIfaceType (substIfaceTyVar gadt_subst tv)
+ | (tv,_kind) <- stripIfaceKindVars dflags tc_tyvars ]
+
pp_nd = case condecls of
- IfAbstractTyCon dis -> ptext (sLit "abstract") <> parens (ppr dis)
- IfDataFamTyCon -> ptext (sLit "data family")
- IfDataTyCon _ -> ptext (sLit "data")
- IfNewTyCon _ -> ptext (sLit "newtype")
-
-pprIfaceDecl (IfaceClass {ifCtxt = context, ifName = clas, ifTyVars = tyvars,
- ifRoles = roles, ifFDs = fds, ifATs = ats, ifSigs = sigs,
- ifRec = isrec})
- = hang (ptext (sLit "class") <+> pprIfaceDeclHead context clas tyvars <+> pprFundeps fds)
- 2 (vcat [pprRoles roles,
- pprRec isrec,
- sep (map ppr ats),
- sep (map ppr sigs)])
-
-pprIfaceDecl (IfaceAxiom {ifName = name, ifTyCon = tycon, ifAxBranches = branches })
- = hang (ptext (sLit "axiom") <+> ppr name <> colon)
- 2 (vcat $ map (pprAxBranch $ Just tycon) branches)
-
-pprIfaceDecl (IfacePatSyn { ifName = name, ifPatHasWrapper = has_wrap,
- ifPatIsInfix = is_infix,
- ifPatUnivTvs = univ_tvs, ifPatExTvs = ex_tvs,
- ifPatProvCtxt = prov_ctxt, ifPatReqCtxt = req_ctxt,
- ifPatArgs = args,
- ifPatTy = ty })
- = hang (text "pattern" <+> header)
- 4 details
+ IfAbstractTyCon d -> ptext (sLit "abstract") <> ppShowIface ss (parens (ppr d))
+ IfDataFamTyCon -> ptext (sLit "data family")
+ IfDataTyCon _ -> ptext (sLit "data")
+ IfNewTyCon _ -> ptext (sLit "newtype")
+
+ pp_extra = vcat [pprCType ctype, pprRec isrec, pp_prom]
+
+ pp_prom | is_prom = ptext (sLit "Promotable")
+ | otherwise = empty
+
+
+pprIfaceDecl ss (IfaceClass { ifATs = ats, ifSigs = sigs, ifRec = isrec
+ , ifCtxt = context, ifName = clas
+ , ifTyVars = tyvars, ifRoles = roles
+ , ifFDs = fds })
+ = vcat [ pprRoles (== Nominal) (pprPrefixIfDeclBndr ss clas) tyvars roles
+ , ptext (sLit "class") <+> pprIfaceDeclHead context ss clas tyvars
+ <+> pprFundeps fds <+> pp_where
+ , nest 2 (vcat [vcat asocs, vcat dsigs, pprec])]
+ where
+ pp_where = ppShowRhs ss $ ppUnless (null sigs && null ats) (ptext (sLit "where"))
+
+ asocs = ppr_trim $ map maybeShowAssoc ats
+ dsigs = ppr_trim $ map maybeShowSig sigs
+ pprec = ppShowIface ss (pprRec isrec)
+
+ maybeShowAssoc :: IfaceAT -> Maybe SDoc
+ maybeShowAssoc asc@(IfaceAT d _)
+ | showSub ss d = Just $ pprIfaceAT ss asc
+ | otherwise = Nothing
+
+ maybeShowSig :: IfaceClassOp -> Maybe SDoc
+ maybeShowSig sg
+ | showSub ss sg = Just $ pprIfaceClassOp ss sg
+ | otherwise = Nothing
+
+pprIfaceDecl ss (IfaceSyn { ifName = tc
+ , ifTyVars = tv
+ , ifSynRhs = IfaceSynonymTyCon mono_ty })
+ = hang (ptext (sLit "type") <+> pprIfaceDeclHead [] ss tc tv <+> equals)
+ 2 (sep [pprIfaceForAll tvs, pprIfaceContextArr theta, ppr tau])
+ where
+ (tvs, theta, tau) = splitIfaceSigmaTy mono_ty
+
+pprIfaceDecl ss (IfaceSyn { ifName = tycon, ifTyVars = tyvars
+ , ifSynRhs = rhs, ifSynKind = kind })
+ = vcat [ hang (text "type family" <+> pprIfaceDeclHead [] ss tycon tyvars <+> dcolon)
+ 2 (ppr kind <+> ppShowRhs ss (pp_rhs rhs))
+ , ppShowRhs ss (nest 2 (pp_branches rhs)) ]
where
- header = ppr name <+> dcolon <+>
- (pprIfaceForAllPart univ_tvs req_ctxt $
- pprIfaceForAllPart ex_tvs prov_ctxt $
- pp_tau)
+ pp_rhs IfaceOpenSynFamilyTyCon = ppShowIface ss (ptext (sLit "open"))
+ pp_rhs IfaceAbstractClosedSynFamilyTyCon = ppShowIface ss (ptext (sLit "closed, abstract"))
+ pp_rhs (IfaceClosedSynFamilyTyCon _ (_:_)) = ptext (sLit "where")
+ pp_rhs IfaceBuiltInSynFamTyCon = ppShowIface ss (ptext (sLit "built-in"))
+ pp_rhs _ = panic "pprIfaceDecl syn"
- details = sep [ if is_infix then text "Infix" else empty
- , if has_wrap then text "HasWrapper" else empty
- ]
+ pp_branches (IfaceClosedSynFamilyTyCon ax brs)
+ = vcat (map (pprAxBranch (pprPrefixIfDeclBndr ss tycon)) brs)
+ $$ ppShowIface ss (ptext (sLit "axiom") <+> ppr ax)
+ pp_branches _ = empty
+
+pprIfaceDecl _ (IfacePatSyn { ifName = name, ifPatWrapper = wrapper,
+ ifPatIsInfix = is_infix,
+ ifPatUnivTvs = _univ_tvs, ifPatExTvs = _ex_tvs,
+ ifPatProvCtxt = prov_ctxt, ifPatReqCtxt = req_ctxt,
+ ifPatArgs = args,
+ ifPatTy = ty })
+ = pprPatSynSig name has_wrap args' ty' (pprCtxt prov_ctxt) (pprCtxt req_ctxt)
+ where
+ has_wrap = isJust wrapper
+ args' = case (is_infix, args) of
+ (True, [left_ty, right_ty]) ->
+ InfixPatSyn (pprParendIfaceType left_ty) (pprParendIfaceType right_ty)
+ (_, tys) ->
+ PrefixPatSyn (map pprParendIfaceType tys)
+
+ ty' = pprParendIfaceType ty
+
+ pprCtxt [] = Nothing
+ pprCtxt ctxt = Just $ pprIfaceContext ctxt
+
+pprIfaceDecl ss (IfaceId { ifName = var, ifType = ty,
+ ifIdDetails = details, ifIdInfo = info })
+ = vcat [ hang (pprPrefixIfDeclBndr ss var <+> dcolon)
+ 2 (pprIfaceSigmaType ty)
+ , ppShowIface ss (ppr details)
+ , ppShowIface ss (ppr info) ]
+
+pprIfaceDecl _ (IfaceForeign {ifName = tycon})
+ = hsep [ptext (sLit "foreign import type dotnet"), ppr tycon]
- pp_tau = case map pprParendIfaceType (arg_tys ++ [ty]) of
- (t:ts) -> fsep (t : map (arrow <+>) ts)
- [] -> panic "pp_tau"
+pprIfaceDecl _ (IfaceAxiom { ifName = name, ifTyCon = tycon
+ , ifAxBranches = branches })
+ = hang (ptext (sLit "axiom") <+> ppr name <> dcolon)
+ 2 (vcat $ map (pprAxBranch (ppr tycon)) branches)
- arg_tys = map snd args
pprCType :: Maybe CType -> SDoc
-pprCType Nothing = ptext (sLit "No C type associated")
+pprCType Nothing = empty
pprCType (Just cType) = ptext (sLit "C type:") <+> ppr cType
-pprRoles :: [Role] -> SDoc
-pprRoles [] = empty
-pprRoles roles = text "Roles:" <+> ppr roles
+-- if, for each role, suppress_if role is True, then suppress the role
+-- output
+pprRoles :: (Role -> Bool) -> SDoc -> [IfaceTvBndr] -> [Role] -> SDoc
+pprRoles suppress_if tyCon tyvars roles
+ = sdocWithDynFlags $ \dflags ->
+ let froles = suppressIfaceKinds dflags tyvars roles
+ in ppUnless (all suppress_if roles || null froles) $
+ ptext (sLit "type role") <+> tyCon <+> hsep (map ppr froles)
pprRec :: RecFlag -> SDoc
-pprRec isrec = ptext (sLit "RecFlag") <+> ppr isrec
+pprRec NonRecursive = empty
+pprRec Recursive = ptext (sLit "RecFlag: Recursive")
-pprAxiom :: Maybe Name -> SDoc
-pprAxiom Nothing = ptext (sLit "FamilyInstance: none")
-pprAxiom (Just ax) = ptext (sLit "FamilyInstance:") <+> ppr ax
+pprInfixIfDeclBndr, pprPrefixIfDeclBndr :: ShowSub -> OccName -> SDoc
+pprInfixIfDeclBndr (ShowSub { ss_ppr_bndr = ppr_bndr }) occ
+ = pprInfixVar (isSymOcc occ) (ppr_bndr occ)
+pprPrefixIfDeclBndr (ShowSub { ss_ppr_bndr = ppr_bndr }) occ
+ = parenSymOcc occ (ppr_bndr occ)
instance Outputable IfaceClassOp where
- ppr (IfaceClassOp n dm ty) = ppr n <+> ppr dm <+> dcolon <+> ppr ty
+ ppr = pprIfaceClassOp showAll
+
+pprIfaceClassOp :: ShowSub -> IfaceClassOp -> SDoc
+pprIfaceClassOp ss (IfaceClassOp n dm ty) = hang opHdr 2 (pprIfaceSigmaType ty)
+ where opHdr = pprPrefixIfDeclBndr ss n
+ <+> ppShowIface ss (ppr dm) <+> dcolon
instance Outputable IfaceAT where
- ppr (IfaceAT d defs)
- = vcat [ ppr d
- , ppUnless (null defs) $ nest 2 $
- ptext (sLit "Defaults:") <+> vcat (map ppr defs) ]
-
-pprIfaceDeclHead :: IfaceContext -> OccName -> [IfaceTvBndr] -> SDoc
-pprIfaceDeclHead context thing tyvars
- = hsep [pprIfaceContext context, parenSymOcc thing (ppr thing),
- pprIfaceTvBndrs tyvars]
-
-pp_condecls :: OccName -> IfaceConDecls -> SDoc
-pp_condecls _ (IfAbstractTyCon {}) = empty
-pp_condecls _ IfDataFamTyCon = empty
-pp_condecls tc (IfNewTyCon c) = equals <+> pprIfaceConDecl tc c
-pp_condecls tc (IfDataTyCon cs) = equals <+> sep (punctuate (ptext (sLit " |"))
- (map (pprIfaceConDecl tc) cs))
-
-mkIfaceEqPred :: IfaceType -> IfaceType -> IfacePredType
--- IA0_NOTE: This is wrong, but only used for pretty-printing.
-mkIfaceEqPred ty1 ty2 = IfaceTyConApp (IfaceTc eqTyConName) [ty1, ty2]
-
-pprIfaceConDecl :: OccName -> IfaceConDecl -> SDoc
-pprIfaceConDecl tc
- (IfCon { ifConOcc = name, ifConInfix = is_infix, ifConWrapper = has_wrap,
- ifConUnivTvs = univ_tvs, ifConExTvs = ex_tvs,
+ ppr = pprIfaceAT showAll
+
+pprIfaceAT :: ShowSub -> IfaceAT -> SDoc
+pprIfaceAT ss (IfaceAT d mb_def)
+ = vcat [ pprIfaceDecl ss d
+ , case mb_def of
+ Nothing -> empty
+ Just rhs -> nest 2 $
+ ptext (sLit "Default:") <+> ppr rhs ]
+
+instance Outputable IfaceTyConParent where
+ ppr p = pprIfaceTyConParent p
+
+pprIfaceTyConParent :: IfaceTyConParent -> SDoc
+pprIfaceTyConParent IfNoParent
+ = empty
+pprIfaceTyConParent (IfDataInstance _ tc tys)
+ = sdocWithDynFlags $ \dflags ->
+ let ftys = stripKindArgs dflags tys
+ in pprIfaceTypeApp tc ftys
+
+pprIfaceDeclHead :: IfaceContext -> ShowSub -> OccName -> [IfaceTvBndr] -> SDoc
+pprIfaceDeclHead context ss tc_occ tv_bndrs
+ = sdocWithDynFlags $ \ dflags ->
+ sep [ pprIfaceContextArr context
+ , pprPrefixIfDeclBndr ss tc_occ
+ <+> pprIfaceTvBndrs (stripIfaceKindVars dflags tv_bndrs) ]
+
+isVanillaIfaceConDecl :: IfaceConDecl -> Bool
+isVanillaIfaceConDecl (IfCon { ifConExTvs = ex_tvs
+ , ifConEqSpec = eq_spec
+ , ifConCtxt = ctxt })
+ = (null ex_tvs) && (null eq_spec) && (null ctxt)
+
+pprIfaceConDecl :: ShowSub -> Bool
+ -> (IfaceEqSpec -> ([IfaceTvBndr], SDoc))
+ -> IfaceConDecl -> SDoc
+pprIfaceConDecl ss gadt_style mk_user_con_res_ty
+ (IfCon { ifConOcc = name, ifConInfix = is_infix,
+ ifConExTvs = ex_tvs,
ifConEqSpec = eq_spec, ifConCtxt = ctxt, ifConArgTys = arg_tys,
- ifConStricts = strs, ifConFields = fields })
- = sep [main_payload,
- if is_infix then ptext (sLit "Infix") else empty,
- if has_wrap then ptext (sLit "HasWrapper") else empty,
- ppUnless (null strs) $
- nest 2 (ptext (sLit "Stricts:") <+> hsep (map ppr_bang strs)),
- ppUnless (null fields) $
- nest 2 (ptext (sLit "Fields:") <+> hsep (map ppr fields))]
+ ifConStricts = stricts, ifConFields = labels })
+ | gadt_style = pp_prefix_con <+> dcolon <+> ppr_ty
+ | otherwise = ppr_fields tys_w_strs
where
- ppr_bang IfNoBang = char '_' -- Want to see these
- ppr_bang IfStrict = char '!'
- ppr_bang IfUnpack = ptext (sLit "!!")
- ppr_bang (IfUnpackCo co) = ptext (sLit "!!") <> pprParendIfaceCoercion co
-
- main_payload = ppr name <+> dcolon <+>
- pprIfaceForAllPart (univ_tvs ++ ex_tvs) (eq_ctxt ++ ctxt) pp_tau
+ tys_w_strs :: [(IfaceBang, IfaceType)]
+ tys_w_strs = zip stricts arg_tys
+ pp_prefix_con = pprPrefixIfDeclBndr ss name
- eq_ctxt = [(mkIfaceEqPred (IfaceTyVar (occNameFS tv)) ty)
- | (tv,ty) <- eq_spec]
+ (univ_tvs, pp_res_ty) = mk_user_con_res_ty eq_spec
+ ppr_ty = pprIfaceForAllPart (univ_tvs ++ ex_tvs) ctxt pp_tau
-- A bit gruesome this, but we can't form the full con_tau, and ppr it,
-- because we don't have a Name for the tycon, only an OccName
@@ -1197,7 +898,26 @@ pprIfaceConDecl tc
(t:ts) -> fsep (t : map (arrow <+>) ts)
[] -> panic "pp_con_taus"
- pp_res_ty = ppr tc <+> fsep [ppr tv | (tv,_) <- univ_tvs]
+ ppr_bang IfNoBang = ppWhen opt_PprStyle_Debug $ char '_'
+ ppr_bang IfStrict = char '!'
+ ppr_bang IfUnpack = ptext (sLit "{-# UNPACK #-}")
+ ppr_bang (IfUnpackCo co) = ptext (sLit "! {-# UNPACK #-}") <>
+ pprParendIfaceCoercion co
+
+ pprParendBangTy (bang, ty) = ppr_bang bang <> pprParendIfaceType ty
+ pprBangTy (bang, ty) = ppr_bang bang <> ppr ty
+
+ maybe_show_label (lbl,bty)
+ | showSub ss lbl = Just (pprPrefixIfDeclBndr ss lbl <+> dcolon <+> pprBangTy bty)
+ | otherwise = Nothing
+
+ ppr_fields [ty1, ty2]
+ | is_infix && null labels
+ = sep [pprParendBangTy ty1, pprInfixIfDeclBndr ss name, pprParendBangTy ty2]
+ ppr_fields fields
+ | null labels = pp_prefix_con <+> sep (map pprParendBangTy fields)
+ | otherwise = pp_prefix_con <+> (braces $ sep $ punctuate comma $ ppr_trim $
+ map maybe_show_label (zip labels fields))
instance Outputable IfaceRule where
ppr (IfaceRule { ifRuleName = name, ifActivation = act, ifRuleBndrs = bndrs,
@@ -1209,15 +929,15 @@ instance Outputable IfaceRule where
]
instance Outputable IfaceClsInst where
- ppr (IfaceClsInst {ifDFun = dfun_id, ifOFlag = flag,
- ifInstCls = cls, ifInstTys = mb_tcs})
+ ppr (IfaceClsInst { ifDFun = dfun_id, ifOFlag = flag
+ , ifInstCls = cls, ifInstTys = mb_tcs})
= hang (ptext (sLit "instance") <+> ppr flag
<+> ppr cls <+> brackets (pprWithCommas ppr_rough mb_tcs))
2 (equals <+> ppr dfun_id)
instance Outputable IfaceFamInst where
- ppr (IfaceFamInst {ifFamInstFam = fam, ifFamInstTys = mb_tcs,
- ifFamInstAxiom = tycon_ax})
+ ppr (IfaceFamInst { ifFamInstFam = fam, ifFamInstTys = mb_tcs
+ , ifFamInstAxiom = tycon_ax})
= hang (ptext (sLit "family instance") <+>
ppr fam <+> pprWithCommas (brackets . ppr_rough) mb_tcs)
2 (equals <+> ppr tycon_ax)
@@ -1227,6 +947,26 @@ ppr_rough Nothing = dot
ppr_rough (Just tc) = ppr tc
\end{code}
+Note [Result type of a data family GADT]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider
+ data family T a
+ data instance T (p,q) where
+ T1 :: T (Int, Maybe c)
+ T2 :: T (Bool, q)
+
+The IfaceDecl actually looks like
+
+ data TPr p q where
+ T1 :: forall p q. forall c. (p~Int,q~Maybe c) => TPr p q
+ T2 :: forall p q. (p~Bool) => TPr p q
+
+To reconstruct the result types for T1 and T2 that we
+want to pretty print, we substitute the eq-spec
+[p->Int, q->Maybe c] in the arg pattern (p,q) to give
+ T (Int, Maybe c)
+Remember that in IfaceSyn, the TyCon and DataCon share the same
+universal type variables.
----------------------------- Printing IfaceExpr ------------------------------------
@@ -1234,6 +974,9 @@ ppr_rough (Just tc) = ppr tc
instance Outputable IfaceExpr where
ppr e = pprIfaceExpr noParens e
+noParens :: SDoc -> SDoc
+noParens pp = pp
+
pprParendIfaceExpr :: IfaceExpr -> SDoc
pprParendIfaceExpr = pprIfaceExpr parens
@@ -1316,6 +1059,8 @@ pprIfaceTickish (IfaceHpcTick m ix)
= braces (text "tick" <+> ppr m <+> ppr ix)
pprIfaceTickish (IfaceSCC cc tick scope)
= braces (pprCostCentreCore cc <+> ppr tick <+> ppr scope)
+pprIfaceTickish (IfaceSource src _names)
+ = braces (text $ showUserRealSpan True src)
------------------
pprIfaceApp :: IfaceExpr -> [SDoc] -> SDoc
@@ -1359,17 +1104,22 @@ instance Outputable IfaceUnfolding where
pprParendIfaceExpr e]
ppr (IfDFunUnfold bs es) = hang (ptext (sLit "DFun:") <+> sep (map ppr bs) <> dot)
2 (sep (map pprParendIfaceExpr es))
+\end{code}
--- -----------------------------------------------------------------------------
--- | Finding the Names in IfaceSyn
+%************************************************************************
+%* *
+ Finding the Names in IfaceSyn
+%* *
+%************************************************************************
--- This is used for dependency analysis in MkIface, so that we
--- fingerprint a declaration before the things that depend on it. It
--- is specific to interface-file fingerprinting in the sense that we
--- don't collect *all* Names: for example, the DFun of an instance is
--- recorded textually rather than by its fingerprint when
--- fingerprinting the instance, so DFuns are not dependencies.
+This is used for dependency analysis in MkIface, so that we
+fingerprint a declaration before the things that depend on it. It
+is specific to interface-file fingerprinting in the sense that we
+don't collect *all* Names: for example, the DFun of an instance is
+recorded textually rather than by its fingerprint when
+fingerprinting the instance, so DFuns are not dependencies.
+\begin{code}
freeNamesIfDecl :: IfaceDecl -> NameSet
freeNamesIfDecl (IfaceId _s t d i) =
freeNamesIfType t &&&
@@ -1379,7 +1129,7 @@ freeNamesIfDecl IfaceForeign{} =
emptyNameSet
freeNamesIfDecl d@IfaceData{} =
freeNamesIfTvBndrs (ifTyVars d) &&&
- maybe emptyNameSet unitNameSet (ifAxiom d) &&&
+ freeNamesIfaceTyConParent (ifParent d) &&&
freeNamesIfContext (ifCtxt d) &&&
freeNamesIfConDecls (ifCons d)
freeNamesIfDecl d@IfaceSyn{} =
@@ -1396,11 +1146,13 @@ freeNamesIfDecl d@IfaceAxiom{} =
freeNamesIfTc (ifTyCon d) &&&
fnList freeNamesIfAxBranch (ifAxBranches d)
freeNamesIfDecl d@IfacePatSyn{} =
+ unitNameSet (ifPatMatcher d) &&&
+ maybe emptyNameSet unitNameSet (ifPatWrapper d) &&&
freeNamesIfTvBndrs (ifPatUnivTvs d) &&&
freeNamesIfTvBndrs (ifPatExTvs d) &&&
freeNamesIfContext (ifPatProvCtxt d) &&&
freeNamesIfContext (ifPatReqCtxt d) &&&
- fnList freeNamesIfType (map snd (ifPatArgs d)) &&&
+ fnList freeNamesIfType (ifPatArgs d) &&&
freeNamesIfType (ifPatTy d)
freeNamesIfAxBranch :: IfaceAxBranch -> NameSet
@@ -1408,7 +1160,7 @@ freeNamesIfAxBranch (IfaceAxBranch { ifaxbTyVars = tyvars
, ifaxbLHS = lhs
, ifaxbRHS = rhs }) =
freeNamesIfTvBndrs tyvars &&&
- fnList freeNamesIfType lhs &&&
+ freeNamesIfTcArgs lhs &&&
freeNamesIfType rhs
freeNamesIfIdDetails :: IfaceIdDetails -> NameSet
@@ -1419,16 +1171,20 @@ freeNamesIfIdDetails _ = emptyNameSet
freeNamesIfSynRhs :: IfaceSynTyConRhs -> NameSet
freeNamesIfSynRhs (IfaceSynonymTyCon ty) = freeNamesIfType ty
freeNamesIfSynRhs IfaceOpenSynFamilyTyCon = emptyNameSet
-freeNamesIfSynRhs (IfaceClosedSynFamilyTyCon ax) = unitNameSet ax
+freeNamesIfSynRhs (IfaceClosedSynFamilyTyCon ax br)
+ = unitNameSet ax &&& fnList freeNamesIfAxBranch br
freeNamesIfSynRhs IfaceAbstractClosedSynFamilyTyCon = emptyNameSet
+freeNamesIfSynRhs IfaceBuiltInSynFamTyCon = emptyNameSet
freeNamesIfContext :: IfaceContext -> NameSet
freeNamesIfContext = fnList freeNamesIfType
freeNamesIfAT :: IfaceAT -> NameSet
-freeNamesIfAT (IfaceAT decl defs)
+freeNamesIfAT (IfaceAT decl mb_def)
= freeNamesIfDecl decl &&&
- fnList freeNamesIfAxBranch defs
+ case mb_def of
+ Nothing -> emptyNameSet
+ Just rhs -> freeNamesIfType rhs
freeNamesIfClsSig :: IfaceClassOp -> NameSet
freeNamesIfClsSig (IfaceClassOp _n _dm ty) = freeNamesIfType ty
@@ -1439,25 +1195,30 @@ freeNamesIfConDecls (IfNewTyCon c) = freeNamesIfConDecl c
freeNamesIfConDecls _ = emptyNameSet
freeNamesIfConDecl :: IfaceConDecl -> NameSet
-freeNamesIfConDecl c =
- freeNamesIfTvBndrs (ifConUnivTvs c) &&&
- freeNamesIfTvBndrs (ifConExTvs c) &&&
- freeNamesIfContext (ifConCtxt c) &&&
- fnList freeNamesIfType (ifConArgTys c) &&&
- fnList freeNamesIfType (map snd (ifConEqSpec c)) -- equality constraints
+freeNamesIfConDecl c
+ = freeNamesIfTvBndrs (ifConExTvs c) &&&
+ freeNamesIfContext (ifConCtxt c) &&&
+ fnList freeNamesIfType (ifConArgTys c) &&&
+ fnList freeNamesIfType (map snd (ifConEqSpec c)) -- equality constraints
freeNamesIfKind :: IfaceType -> NameSet
freeNamesIfKind = freeNamesIfType
+freeNamesIfTcArgs :: IfaceTcArgs -> NameSet
+freeNamesIfTcArgs (ITC_Type t ts) = freeNamesIfType t &&& freeNamesIfTcArgs ts
+freeNamesIfTcArgs (ITC_Kind k ks) = freeNamesIfKind k &&& freeNamesIfTcArgs ks
+freeNamesIfTcArgs ITC_Nil = emptyNameSet
+
freeNamesIfType :: IfaceType -> NameSet
freeNamesIfType (IfaceTyVar _) = emptyNameSet
freeNamesIfType (IfaceAppTy s t) = freeNamesIfType s &&& freeNamesIfType t
freeNamesIfType (IfaceTyConApp tc ts) =
- freeNamesIfTc tc &&& fnList freeNamesIfType ts
+ freeNamesIfTc tc &&& freeNamesIfTcArgs ts
freeNamesIfType (IfaceLitTy _) = emptyNameSet
freeNamesIfType (IfaceForAllTy tv t) =
freeNamesIfTvBndr tv &&& freeNamesIfType t
freeNamesIfType (IfaceFunTy s t) = freeNamesIfType s &&& freeNamesIfType t
+freeNamesIfType (IfaceDFunTy s t) = freeNamesIfType s &&& freeNamesIfType t
freeNamesIfCoercion :: IfaceCoercion -> NameSet
freeNamesIfCoercion (IfaceReflCo _ t) = freeNamesIfType t
@@ -1539,8 +1300,7 @@ freeNamesIfExpr (IfaceCast e co) = freeNamesIfExpr e &&& freeNamesIfCoercion co
freeNamesIfExpr (IfaceTick _ e) = freeNamesIfExpr e
freeNamesIfExpr (IfaceECase e ty) = freeNamesIfExpr e &&& freeNamesIfType ty
freeNamesIfExpr (IfaceCase s _ alts)
- = freeNamesIfExpr s
- &&& fnList fn_alt alts &&& fn_cons alts
+ = freeNamesIfExpr s &&& fnList fn_alt alts &&& fn_cons alts
where
fn_alt (_con,_bs,r) = freeNamesIfExpr r
@@ -1562,7 +1322,7 @@ freeNamesIfExpr (IfaceLet (IfaceRec as) x)
freeNamesIfExpr _ = emptyNameSet
freeNamesIfTc :: IfaceTyCon -> NameSet
-freeNamesIfTc (IfaceTc tc) = unitNameSet tc
+freeNamesIfTc tc = unitNameSet (ifaceTyConName tc)
-- ToDo: shouldn't we include IfaceIntTc & co.?
freeNamesIfRule :: IfaceRule -> NameSet
@@ -1572,13 +1332,18 @@ freeNamesIfRule (IfaceRule { ifRuleBndrs = bs, ifRuleHead = f
fnList freeNamesIfBndr bs &&&
fnList freeNamesIfExpr es &&&
freeNamesIfExpr rhs
-
+
freeNamesIfFamInst :: IfaceFamInst -> NameSet
freeNamesIfFamInst (IfaceFamInst { ifFamInstFam = famName
, ifFamInstAxiom = axName })
= unitNameSet famName &&&
unitNameSet axName
+freeNamesIfaceTyConParent :: IfaceTyConParent -> NameSet
+freeNamesIfaceTyConParent IfNoParent = emptyNameSet
+freeNamesIfaceTyConParent (IfDataInstance ax tc tys)
+ = unitNameSet ax &&& freeNamesIfTc tc &&& freeNamesIfTcArgs tys
+
-- helpers
(&&&) :: NameSet -> NameSet -> NameSet
(&&&) = unionNameSets
@@ -1612,3 +1377,555 @@ Now, lookupModule depends on DynFlags, but the transitive dependency
on the *locally-defined* type PackageState is not visible. We need
to take account of the use of the data constructor PS in the pattern match.
+
+%************************************************************************
+%* *
+ Binary instances
+%* *
+%************************************************************************
+
+\begin{code}
+instance Binary IfaceDecl where
+ put_ bh (IfaceId name ty details idinfo) = do
+ putByte bh 0
+ put_ bh (occNameFS name)
+ put_ bh ty
+ put_ bh details
+ put_ bh idinfo
+
+ put_ _ (IfaceForeign _ _) =
+ error "Binary.put_(IfaceDecl): IfaceForeign"
+
+ put_ bh (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9 a10) = do
+ putByte bh 2
+ put_ bh (occNameFS a1)
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ put_ bh a6
+ put_ bh a7
+ put_ bh a8
+ put_ bh a9
+ put_ bh a10
+
+ put_ bh (IfaceSyn a1 a2 a3 a4 a5) = do
+ putByte bh 3
+ put_ bh (occNameFS a1)
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+
+ put_ bh (IfaceClass a1 a2 a3 a4 a5 a6 a7 a8 a9) = do
+ putByte bh 4
+ put_ bh a1
+ put_ bh (occNameFS a2)
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ put_ bh a6
+ put_ bh a7
+ put_ bh a8
+ put_ bh a9
+
+ put_ bh (IfaceAxiom a1 a2 a3 a4) = do
+ putByte bh 5
+ put_ bh (occNameFS a1)
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+
+ put_ bh (IfacePatSyn name a2 a3 a4 a5 a6 a7 a8 a9 a10) = do
+ putByte bh 6
+ put_ bh (occNameFS name)
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ put_ bh a6
+ put_ bh a7
+ put_ bh a8
+ put_ bh a9
+ put_ bh a10
+
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do name <- get bh
+ ty <- get bh
+ details <- get bh
+ idinfo <- get bh
+ occ <- return $! mkVarOccFS name
+ return (IfaceId occ ty details idinfo)
+ 1 -> error "Binary.get(TyClDecl): ForeignType"
+ 2 -> do a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ a6 <- get bh
+ a7 <- get bh
+ a8 <- get bh
+ a9 <- get bh
+ a10 <- get bh
+ occ <- return $! mkTcOccFS a1
+ return (IfaceData occ a2 a3 a4 a5 a6 a7 a8 a9 a10)
+ 3 -> do a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ occ <- return $! mkTcOccFS a1
+ return (IfaceSyn occ a2 a3 a4 a5)
+ 4 -> do a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ a6 <- get bh
+ a7 <- get bh
+ a8 <- get bh
+ a9 <- get bh
+ occ <- return $! mkClsOccFS a2
+ return (IfaceClass a1 occ a3 a4 a5 a6 a7 a8 a9)
+ 5 -> do a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ occ <- return $! mkTcOccFS a1
+ return (IfaceAxiom occ a2 a3 a4)
+ 6 -> do a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ a6 <- get bh
+ a7 <- get bh
+ a8 <- get bh
+ a9 <- get bh
+ a10 <- get bh
+ occ <- return $! mkDataOccFS a1
+ return (IfacePatSyn occ a2 a3 a4 a5 a6 a7 a8 a9 a10)
+ _ -> panic (unwords ["Unknown IfaceDecl tag:", show h])
+
+instance Binary IfaceSynTyConRhs where
+ put_ bh IfaceOpenSynFamilyTyCon = putByte bh 0
+ put_ bh (IfaceClosedSynFamilyTyCon ax br) = putByte bh 1 >> put_ bh ax
+ >> put_ bh br
+ put_ bh IfaceAbstractClosedSynFamilyTyCon = putByte bh 2
+ put_ bh (IfaceSynonymTyCon ty) = putByte bh 3 >> put_ bh ty
+ put_ _ IfaceBuiltInSynFamTyCon
+ = pprPanic "Cannot serialize IfaceBuiltInSynFamTyCon, used for pretty-printing only" empty
+
+ get bh = do { h <- getByte bh
+ ; case h of
+ 0 -> return IfaceOpenSynFamilyTyCon
+ 1 -> do { ax <- get bh
+ ; br <- get bh
+ ; return (IfaceClosedSynFamilyTyCon ax br) }
+ 2 -> return IfaceAbstractClosedSynFamilyTyCon
+ _ -> do { ty <- get bh
+ ; return (IfaceSynonymTyCon ty) } }
+
+instance Binary IfaceClassOp where
+ put_ bh (IfaceClassOp n def ty) = do
+ put_ bh (occNameFS n)
+ put_ bh def
+ put_ bh ty
+ get bh = do
+ n <- get bh
+ def <- get bh
+ ty <- get bh
+ occ <- return $! mkVarOccFS n
+ return (IfaceClassOp occ def ty)
+
+instance Binary IfaceAT where
+ put_ bh (IfaceAT dec defs) = do
+ put_ bh dec
+ put_ bh defs
+ get bh = do
+ dec <- get bh
+ defs <- get bh
+ return (IfaceAT dec defs)
+
+instance Binary IfaceAxBranch where
+ put_ bh (IfaceAxBranch a1 a2 a3 a4 a5) = do
+ put_ bh a1
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ get bh = do
+ a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ return (IfaceAxBranch a1 a2 a3 a4 a5)
+
+instance Binary IfaceConDecls where
+ put_ bh (IfAbstractTyCon d) = putByte bh 0 >> put_ bh d
+ put_ bh IfDataFamTyCon = putByte bh 1
+ put_ bh (IfDataTyCon cs) = putByte bh 2 >> put_ bh cs
+ put_ bh (IfNewTyCon c) = putByte bh 3 >> put_ bh c
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> liftM IfAbstractTyCon $ get bh
+ 1 -> return IfDataFamTyCon
+ 2 -> liftM IfDataTyCon $ get bh
+ _ -> liftM IfNewTyCon $ get bh
+
+instance Binary IfaceConDecl where
+ put_ bh (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9) = do
+ put_ bh a1
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ put_ bh a6
+ put_ bh a7
+ put_ bh a8
+ put_ bh a9
+ get bh = do
+ a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ a6 <- get bh
+ a7 <- get bh
+ a8 <- get bh
+ a9 <- get bh
+ return (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9)
+
+instance Binary IfaceBang where
+ put_ bh IfNoBang = putByte bh 0
+ put_ bh IfStrict = putByte bh 1
+ put_ bh IfUnpack = putByte bh 2
+ put_ bh (IfUnpackCo co) = putByte bh 3 >> put_ bh co
+
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do return IfNoBang
+ 1 -> do return IfStrict
+ 2 -> do return IfUnpack
+ _ -> do { a <- get bh; return (IfUnpackCo a) }
+
+instance Binary IfaceClsInst where
+ put_ bh (IfaceClsInst cls tys dfun flag orph) = do
+ put_ bh cls
+ put_ bh tys
+ put_ bh dfun
+ put_ bh flag
+ put_ bh orph
+ get bh = do
+ cls <- get bh
+ tys <- get bh
+ dfun <- get bh
+ flag <- get bh
+ orph <- get bh
+ return (IfaceClsInst cls tys dfun flag orph)
+
+instance Binary IfaceFamInst where
+ put_ bh (IfaceFamInst fam tys name orph) = do
+ put_ bh fam
+ put_ bh tys
+ put_ bh name
+ put_ bh orph
+ get bh = do
+ fam <- get bh
+ tys <- get bh
+ name <- get bh
+ orph <- get bh
+ return (IfaceFamInst fam tys name orph)
+
+instance Binary IfaceRule where
+ put_ bh (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) = do
+ put_ bh a1
+ put_ bh a2
+ put_ bh a3
+ put_ bh a4
+ put_ bh a5
+ put_ bh a6
+ put_ bh a7
+ put_ bh a8
+ get bh = do
+ a1 <- get bh
+ a2 <- get bh
+ a3 <- get bh
+ a4 <- get bh
+ a5 <- get bh
+ a6 <- get bh
+ a7 <- get bh
+ a8 <- get bh
+ return (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8)
+
+instance Binary IfaceAnnotation where
+ put_ bh (IfaceAnnotation a1 a2) = do
+ put_ bh a1
+ put_ bh a2
+ get bh = do
+ a1 <- get bh
+ a2 <- get bh
+ return (IfaceAnnotation a1 a2)
+
+instance Binary IfaceIdDetails where
+ put_ bh IfVanillaId = putByte bh 0
+ put_ bh (IfRecSelId a b) = putByte bh 1 >> put_ bh a >> put_ bh b
+ put_ bh (IfDFunId n) = do { putByte bh 2; put_ bh n }
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> return IfVanillaId
+ 1 -> do { a <- get bh; b <- get bh; return (IfRecSelId a b) }
+ _ -> do { n <- get bh; return (IfDFunId n) }
+
+instance Binary IfaceIdInfo where
+ put_ bh NoInfo = putByte bh 0
+ put_ bh (HasInfo i) = putByte bh 1 >> lazyPut bh i -- NB lazyPut
+
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> return NoInfo
+ _ -> liftM HasInfo $ lazyGet bh -- NB lazyGet
+
+instance Binary IfaceInfoItem where
+ put_ bh (HsArity aa) = putByte bh 0 >> put_ bh aa
+ put_ bh (HsStrictness ab) = putByte bh 1 >> put_ bh ab
+ put_ bh (HsUnfold lb ad) = putByte bh 2 >> put_ bh lb >> put_ bh ad
+ put_ bh (HsInline ad) = putByte bh 3 >> put_ bh ad
+ put_ bh HsNoCafRefs = putByte bh 4
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> liftM HsArity $ get bh
+ 1 -> liftM HsStrictness $ get bh
+ 2 -> do lb <- get bh
+ ad <- get bh
+ return (HsUnfold lb ad)
+ 3 -> liftM HsInline $ get bh
+ _ -> return HsNoCafRefs
+
+instance Binary IfaceUnfolding where
+ put_ bh (IfCoreUnfold s e) = do
+ putByte bh 0
+ put_ bh s
+ put_ bh e
+ put_ bh (IfInlineRule a b c d) = do
+ putByte bh 1
+ put_ bh a
+ put_ bh b
+ put_ bh c
+ put_ bh d
+ put_ bh (IfDFunUnfold as bs) = do
+ putByte bh 2
+ put_ bh as
+ put_ bh bs
+ put_ bh (IfCompulsory e) = do
+ putByte bh 3
+ put_ bh e
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do s <- get bh
+ e <- get bh
+ return (IfCoreUnfold s e)
+ 1 -> do a <- get bh
+ b <- get bh
+ c <- get bh
+ d <- get bh
+ return (IfInlineRule a b c d)
+ 2 -> do as <- get bh
+ bs <- get bh
+ return (IfDFunUnfold as bs)
+ _ -> do e <- get bh
+ return (IfCompulsory e)
+
+
+instance Binary IfaceExpr where
+ put_ bh (IfaceLcl aa) = do
+ putByte bh 0
+ put_ bh aa
+ put_ bh (IfaceType ab) = do
+ putByte bh 1
+ put_ bh ab
+ put_ bh (IfaceCo ab) = do
+ putByte bh 2
+ put_ bh ab
+ put_ bh (IfaceTuple ac ad) = do
+ putByte bh 3
+ put_ bh ac
+ put_ bh ad
+ put_ bh (IfaceLam ae af) = do
+ putByte bh 4
+ put_ bh ae
+ put_ bh af
+ put_ bh (IfaceApp ag ah) = do
+ putByte bh 5
+ put_ bh ag
+ put_ bh ah
+ put_ bh (IfaceCase ai aj ak) = do
+ putByte bh 6
+ put_ bh ai
+ put_ bh aj
+ put_ bh ak
+ put_ bh (IfaceLet al am) = do
+ putByte bh 7
+ put_ bh al
+ put_ bh am
+ put_ bh (IfaceTick an ao) = do
+ putByte bh 8
+ put_ bh an
+ put_ bh ao
+ put_ bh (IfaceLit ap) = do
+ putByte bh 9
+ put_ bh ap
+ put_ bh (IfaceFCall as at) = do
+ putByte bh 10
+ put_ bh as
+ put_ bh at
+ put_ bh (IfaceExt aa) = do
+ putByte bh 11
+ put_ bh aa
+ put_ bh (IfaceCast ie ico) = do
+ putByte bh 12
+ put_ bh ie
+ put_ bh ico
+ put_ bh (IfaceECase a b) = do
+ putByte bh 13
+ put_ bh a
+ put_ bh b
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do aa <- get bh
+ return (IfaceLcl aa)
+ 1 -> do ab <- get bh
+ return (IfaceType ab)
+ 2 -> do ab <- get bh
+ return (IfaceCo ab)
+ 3 -> do ac <- get bh
+ ad <- get bh
+ return (IfaceTuple ac ad)
+ 4 -> do ae <- get bh
+ af <- get bh
+ return (IfaceLam ae af)
+ 5 -> do ag <- get bh
+ ah <- get bh
+ return (IfaceApp ag ah)
+ 6 -> do ai <- get bh
+ aj <- get bh
+ ak <- get bh
+ return (IfaceCase ai aj ak)
+ 7 -> do al <- get bh
+ am <- get bh
+ return (IfaceLet al am)
+ 8 -> do an <- get bh
+ ao <- get bh
+ return (IfaceTick an ao)
+ 9 -> do ap <- get bh
+ return (IfaceLit ap)
+ 10 -> do as <- get bh
+ at <- get bh
+ return (IfaceFCall as at)
+ 11 -> do aa <- get bh
+ return (IfaceExt aa)
+ 12 -> do ie <- get bh
+ ico <- get bh
+ return (IfaceCast ie ico)
+ 13 -> do a <- get bh
+ b <- get bh
+ return (IfaceECase a b)
+ _ -> panic ("get IfaceExpr " ++ show h)
+
+instance Binary IfaceTickish where
+ put_ bh (IfaceHpcTick m ix) = do
+ putByte bh 0
+ put_ bh m
+ put_ bh ix
+ put_ bh (IfaceSCC cc tick push) = do
+ putByte bh 1
+ put_ bh cc
+ put_ bh tick
+ put_ bh push
+ put_ bh (IfaceSource src name) = do
+ putByte bh 2
+ put_ bh (srcSpanFile src)
+ put_ bh (srcSpanStartLine src)
+ put_ bh (srcSpanStartCol src)
+ put_ bh (srcSpanEndLine src)
+ put_ bh (srcSpanEndCol src)
+ put_ bh name
+
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do m <- get bh
+ ix <- get bh
+ return (IfaceHpcTick m ix)
+ 1 -> do cc <- get bh
+ tick <- get bh
+ push <- get bh
+ return (IfaceSCC cc tick push)
+ 2 -> do file <- get bh
+ sl <- get bh
+ sc <- get bh
+ el <- get bh
+ ec <- get bh
+ let start = mkRealSrcLoc file sl sc
+ end = mkRealSrcLoc file el ec
+ name <- get bh
+ return (IfaceSource (mkRealSrcSpan start end) name)
+ _ -> panic ("get IfaceTickish " ++ show h)
+
+instance Binary IfaceConAlt where
+ put_ bh IfaceDefault = putByte bh 0
+ put_ bh (IfaceDataAlt aa) = putByte bh 1 >> put_ bh aa
+ put_ bh (IfaceLitAlt ac) = putByte bh 2 >> put_ bh ac
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> return IfaceDefault
+ 1 -> liftM IfaceDataAlt $ get bh
+ _ -> liftM IfaceLitAlt $ get bh
+
+instance Binary IfaceBinding where
+ put_ bh (IfaceNonRec aa ab) = putByte bh 0 >> put_ bh aa >> put_ bh ab
+ put_ bh (IfaceRec ac) = putByte bh 1 >> put_ bh ac
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> do { aa <- get bh; ab <- get bh; return (IfaceNonRec aa ab) }
+ _ -> do { ac <- get bh; return (IfaceRec ac) }
+
+instance Binary IfaceLetBndr where
+ put_ bh (IfLetBndr a b c) = do
+ put_ bh a
+ put_ bh b
+ put_ bh c
+ get bh = do a <- get bh
+ b <- get bh
+ c <- get bh
+ return (IfLetBndr a b c)
+
+instance Binary IfaceTyConParent where
+ put_ bh IfNoParent = putByte bh 0
+ put_ bh (IfDataInstance ax pr ty) = do
+ putByte bh 1
+ put_ bh ax
+ put_ bh pr
+ put_ bh ty
+ get bh = do
+ h <- getByte bh
+ case h of
+ 0 -> return IfNoParent
+ _ -> do
+ ax <- get bh
+ pr <- get bh
+ ty <- get bh
+ return $ IfDataInstance ax pr ty
+\end{code}
diff --git a/compiler/iface/IfaceType.lhs b/compiler/iface/IfaceType.lhs
index 822e3da75a6c..c55edc618513 100644
--- a/compiler/iface/IfaceType.lhs
+++ b/compiler/iface/IfaceType.lhs
@@ -6,47 +6,63 @@
This module defines interface types and binders
\begin{code}
+{-# LANGUAGE CPP #-}
module IfaceType (
IfExtName, IfLclName,
IfaceType(..), IfacePredType, IfaceKind, IfaceTyCon(..), IfaceCoercion(..),
- IfaceTyLit(..),
- IfaceContext, IfaceBndr(..), IfaceTvBndr, IfaceIdBndr,
+ IfaceTyLit(..), IfaceTcArgs(..),
+ IfaceContext, IfaceBndr(..), IfaceTvBndr, IfaceIdBndr,
-- Conversion from Type -> IfaceType
- toIfaceType, toIfaceKind, toIfaceContext,
- toIfaceBndr, toIfaceIdBndr, toIfaceTvBndrs,
- toIfaceTyCon, toIfaceTyCon_name,
+ toIfaceType, toIfaceTypes, toIfaceKind, toIfaceTyVar,
+ toIfaceContext, toIfaceBndr, toIfaceIdBndr,
+ toIfaceTvBndrs, toIfaceTyCon, toIfaceTyCon_name,
+ toIfaceTcArgs,
+
+ -- Conversion from IfaceTcArgs -> IfaceType
+ tcArgsIfaceTypes,
-- Conversion from Coercion -> IfaceCoercion
toIfaceCoercion,
-- Printing
- pprIfaceType, pprParendIfaceType, pprIfaceContext,
+ pprIfaceType, pprParendIfaceType, pprIfaceContext, pprIfaceContextArr,
pprIfaceIdBndr, pprIfaceTvBndr, pprIfaceTvBndrs,
- pprIfaceBndrs,
- tOP_PREC, tYCON_PREC, noParens, maybeParen, pprIfaceForAllPart,
- pprIfaceCoercion, pprParendIfaceCoercion
-
+ pprIfaceBndrs, pprIfaceTcArgs, pprParendIfaceTcArgs,
+ pprIfaceForAllPart, pprIfaceForAll, pprIfaceSigmaType,
+ pprIfaceCoercion, pprParendIfaceCoercion,
+ splitIfaceSigmaTy, pprIfaceTypeApp, pprUserIfaceForAll,
+
+ suppressIfaceKinds,
+ stripIfaceKindVars,
+ stripKindArgs,
+ substIfaceType, substIfaceTyVar, substIfaceTcArgs, mkIfaceTySubst
) where
+#include "HsVersions.h"
+
import Coercion
-import TypeRep hiding( maybeParen )
+import DataCon ( dataConTyCon )
+import TcType
+import DynFlags
+import TypeRep
import Unique( hasKey )
-import TyCon
+import Util ( filterOut, lengthIs, zipWithEqual )
+import TyCon hiding ( pprPromotionQuote )
import CoAxiom
import Id
import Var
+-- import RnEnv( FastStringEnv, mkFsEnv, lookupFsEnv )
import TysWiredIn
import TysPrim
-import PrelNames( funTyConKey )
+import PrelNames( funTyConKey, ipClassName )
import Name
import BasicTypes
import Binary
import Outputable
import FastString
-
-import Control.Monad
+import UniqSet
\end{code}
%************************************************************************
@@ -75,8 +91,9 @@ data IfaceType -- A kind of universal type, used for types and kinds
= IfaceTyVar IfLclName -- Type/coercion variable only, not tycon
| IfaceAppTy IfaceType IfaceType
| IfaceFunTy IfaceType IfaceType
+ | IfaceDFunTy IfaceType IfaceType
| IfaceForAllTy IfaceTvBndr IfaceType
- | IfaceTyConApp IfaceTyCon [IfaceType] -- Not necessarily saturated
+ | IfaceTyConApp IfaceTyCon IfaceTcArgs -- Not necessarily saturated
-- Includes newtypes, synonyms, tuples
| IfaceLitTy IfaceTyLit
@@ -87,9 +104,24 @@ data IfaceTyLit
= IfaceNumTyLit Integer
| IfaceStrTyLit FastString
--- Encodes type constructors, kind constructors
--- coercion constructors, the lot
-newtype IfaceTyCon = IfaceTc { ifaceTyConName :: IfExtName }
+-- See Note [Suppressing kinds]
+-- We use a new list type (rather than [(IfaceType,Bool)], because
+-- it'll be more compact and faster to parse in interface
+-- files. Rather than two bytes and two decisions (nil/cons, and
+-- type/kind) there'll just be one.
+data IfaceTcArgs
+ = ITC_Nil
+ | ITC_Type IfaceType IfaceTcArgs
+ | ITC_Kind IfaceKind IfaceTcArgs
+
+-- Encodes type constructors, kind constructors,
+-- coercion constructors, the lot.
+-- We have to tag them in order to pretty print them
+-- properly.
+data IfaceTyCon
+ = IfaceTc { ifaceTyConName :: IfExtName }
+ | IfacePromotedDataCon { ifaceTyConName :: IfExtName }
+ | IfacePromotedTyCon { ifaceTyConName :: IfExtName }
data IfaceCoercion
= IfaceReflCo Role IfaceType
@@ -129,40 +161,167 @@ splitIfaceSigmaTy ty
= case split_foralls ty of { (tvs, rho) -> (tv:tvs, rho) }
split_foralls rho = ([], rho)
- split_rho (IfaceFunTy ty1 ty2)
- | isIfacePredTy ty1 = case split_rho ty2 of { (ps, tau) -> (ty1:ps, tau) }
+ split_rho (IfaceDFunTy ty1 ty2)
+ = case split_rho ty2 of { (ps, tau) -> (ty1:ps, tau) }
split_rho tau = ([], tau)
+
+suppressIfaceKinds :: DynFlags -> [IfaceTvBndr] -> [a] -> [a]
+suppressIfaceKinds dflags tys xs
+ | gopt Opt_PrintExplicitKinds dflags = xs
+ | otherwise = suppress tys xs
+ where
+ suppress _ [] = []
+ suppress [] a = a
+ suppress (k:ks) a@(_:xs)
+ | isIfaceKindVar k = suppress ks xs
+ | otherwise = a
+
+stripIfaceKindVars :: DynFlags -> [IfaceTvBndr] -> [IfaceTvBndr]
+stripIfaceKindVars dflags tyvars
+ | gopt Opt_PrintExplicitKinds dflags = tyvars
+ | otherwise = filterOut isIfaceKindVar tyvars
+
+isIfaceKindVar :: IfaceTvBndr -> Bool
+isIfaceKindVar (_, IfaceTyConApp tc _) = ifaceTyConName tc == superKindTyConName
+isIfaceKindVar _ = False
+
+ifTyVarsOfType :: IfaceType -> UniqSet IfLclName
+ifTyVarsOfType ty
+ = case ty of
+ IfaceTyVar v -> unitUniqSet v
+ IfaceAppTy fun arg
+ -> ifTyVarsOfType fun `unionUniqSets` ifTyVarsOfType arg
+ IfaceFunTy arg res
+ -> ifTyVarsOfType arg `unionUniqSets` ifTyVarsOfType res
+ IfaceDFunTy arg res
+ -> ifTyVarsOfType arg `unionUniqSets` ifTyVarsOfType res
+ IfaceForAllTy (var,t) ty
+ -> delOneFromUniqSet (ifTyVarsOfType ty) var `unionUniqSets`
+ ifTyVarsOfType t
+ IfaceTyConApp _ args -> ifTyVarsOfArgs args
+ IfaceLitTy _ -> emptyUniqSet
+
+ifTyVarsOfArgs :: IfaceTcArgs -> UniqSet IfLclName
+ifTyVarsOfArgs args = argv emptyUniqSet args
+ where
+ argv vs (ITC_Type t ts) = argv (vs `unionUniqSets` (ifTyVarsOfType t)) ts
+ argv vs (ITC_Kind k ks) = argv (vs `unionUniqSets` (ifTyVarsOfType k)) ks
+ argv vs ITC_Nil = vs
+\end{code}
+
+Substitutions on IfaceType. This is only used during pretty-printing to construct
+the result type of a GADT, and does not deal with binders (eg IfaceForAll), so
+it doesn't need fancy capture stuff.
+
+\begin{code}
+type IfaceTySubst = FastStringEnv IfaceType
+
+mkIfaceTySubst :: [IfaceTvBndr] -> [IfaceType] -> IfaceTySubst
+mkIfaceTySubst tvs tys = mkFsEnv $ zipWithEqual "mkIfaceTySubst" (\(fs,_) ty -> (fs,ty)) tvs tys
+
+substIfaceType :: IfaceTySubst -> IfaceType -> IfaceType
+substIfaceType env ty
+ = go ty
+ where
+ go (IfaceTyVar tv) = substIfaceTyVar env tv
+ go (IfaceAppTy t1 t2) = IfaceAppTy (go t1) (go t2)
+ go (IfaceFunTy t1 t2) = IfaceFunTy (go t1) (go t2)
+ go (IfaceDFunTy t1 t2) = IfaceDFunTy (go t1) (go t2)
+ go ty@(IfaceLitTy {}) = ty
+ go (IfaceTyConApp tc tys) = IfaceTyConApp tc (substIfaceTcArgs env tys)
+ go (IfaceForAllTy {}) = pprPanic "substIfaceType" (ppr ty)
+
+substIfaceTcArgs :: IfaceTySubst -> IfaceTcArgs -> IfaceTcArgs
+substIfaceTcArgs env args
+ = go args
+ where
+ go ITC_Nil = ITC_Nil
+ go (ITC_Type ty tys) = ITC_Type (substIfaceType env ty) (go tys)
+ go (ITC_Kind ty tys) = ITC_Kind (substIfaceType env ty) (go tys)
+
+substIfaceTyVar :: IfaceTySubst -> IfLclName -> IfaceType
+substIfaceTyVar env tv
+ | Just ty <- lookupFsEnv env tv = ty
+ | otherwise = IfaceTyVar tv
\end{code}
%************************************************************************
%* *
- Pretty-printing
+ Functions over IFaceTcArgs
%* *
%************************************************************************
-Precedence
-~~~~~~~~~~
-@ppr_ty@ takes an @Int@ that is the precedence of the context.
-The precedence levels are:
-\begin{description}
-\item[tOP_PREC] No parens required.
-\item[fUN_PREC] Left hand argument of a function arrow.
-\item[tYCON_PREC] Argument of a type constructor.
-\end{description}
\begin{code}
-tOP_PREC, fUN_PREC, tYCON_PREC :: Int
-tOP_PREC = 0 -- type in ParseIface.y
-fUN_PREC = 1 -- btype in ParseIface.y
-tYCON_PREC = 2 -- atype in ParseIface.y
-
-noParens :: SDoc -> SDoc
-noParens pp = pp
-
-maybeParen :: Int -> Int -> SDoc -> SDoc
-maybeParen ctxt_prec inner_prec pretty
- | ctxt_prec < inner_prec = pretty
- | otherwise = parens pretty
+stripKindArgs :: DynFlags -> IfaceTcArgs -> IfaceTcArgs
+stripKindArgs dflags tys
+ | gopt Opt_PrintExplicitKinds dflags = tys
+ | otherwise = suppressKinds tys
+ where
+ suppressKinds c
+ = case c of
+ ITC_Kind _ ts -> suppressKinds ts
+ _ -> c
+
+toIfaceTcArgs :: TyCon -> [Type] -> IfaceTcArgs
+-- See Note [Suppressing kinds]
+toIfaceTcArgs tc ty_args
+ = go (tyConKind tc) ty_args
+ where
+ go _ [] = ITC_Nil
+ go (ForAllTy _ res) (t:ts) = ITC_Kind (toIfaceKind t) (go res ts)
+ go (FunTy _ res) (t:ts) = ITC_Type (toIfaceType t) (go res ts)
+ go kind (t:ts) = WARN( True, ppr tc $$ ppr (tyConKind tc) $$ ppr ty_args )
+ ITC_Type (toIfaceType t) (go kind ts) -- Ill-kinded
+
+tcArgsIfaceTypes :: IfaceTcArgs -> [IfaceType]
+tcArgsIfaceTypes ITC_Nil = []
+tcArgsIfaceTypes (ITC_Kind t ts) = t : tcArgsIfaceTypes ts
+tcArgsIfaceTypes (ITC_Type t ts) = t : tcArgsIfaceTypes ts
+\end{code}
+
+Note [Suppressing kinds]
+~~~~~~~~~~~~~~~~~~~~~~~~
+We use the IfaceTcArgs to specify which of the arguments to a type
+constructor instantiate a for-all, and which are regular kind args.
+This in turn used to control kind-suppression when printing types,
+under the control of -fprint-explicit-kinds. See also TypeRep.suppressKinds.
+For example, given
+ T :: forall k. (k->*) -> k -> * -- Ordinary kind polymorphism
+ 'Just :: forall k. k -> 'Maybe k -- Promoted
+we want
+ T * Tree Int prints as T Tree Int
+ 'Just * prints as Just *
+
+
+%************************************************************************
+%* *
+ Functions over IFaceTyCon
+%* *
+%************************************************************************
+
+\begin{code}
+--isPromotedIfaceTyCon :: IfaceTyCon -> Bool
+--isPromotedIfaceTyCon (IfacePromotedTyCon _) = True
+--isPromotedIfaceTyCon _ = False
+\end{code}
+%************************************************************************
+%* *
+ Pretty-printing
+%* *
+%************************************************************************
+
+\begin{code}
+pprIfaceInfixApp :: (TyPrec -> a -> SDoc) -> TyPrec -> SDoc -> a -> a -> SDoc
+pprIfaceInfixApp pp p pp_tc ty1 ty2
+ = maybeParen p FunPrec $
+ sep [pp FunPrec ty1, pprInfixVar True pp_tc <+> pp FunPrec ty2]
+
+pprIfacePrefixApp :: TyPrec -> SDoc -> [SDoc] -> SDoc
+pprIfacePrefixApp p pp_fun pp_tys
+ | null pp_tys = pp_fun
+ | otherwise = maybeParen p TyConPrec $
+ hang pp_fun 2 (sep pp_tys)
\end{code}
@@ -180,9 +339,9 @@ pprIfaceIdBndr :: (IfLclName, IfaceType) -> SDoc
pprIfaceIdBndr (name, ty) = hsep [ppr name, dcolon, ppr ty]
pprIfaceTvBndr :: IfaceTvBndr -> SDoc
-pprIfaceTvBndr (tv, IfaceTyConApp tc [])
+pprIfaceTvBndr (tv, IfaceTyConApp tc ITC_Nil)
| ifaceTyConName tc == liftedTypeKindTyConName = ppr tv
-pprIfaceTvBndr (tv, kind) = parens (ppr tv <> dcolon <> ppr kind)
+pprIfaceTvBndr (tv, kind) = parens (ppr tv <+> dcolon <+> ppr kind)
pprIfaceTvBndrs :: [IfaceTvBndr] -> SDoc
pprIfaceTvBndrs tyvars = sep (map pprIfaceTvBndr tyvars)
@@ -211,106 +370,200 @@ instance Outputable IfaceType where
ppr ty = pprIfaceType ty
pprIfaceType, pprParendIfaceType ::IfaceType -> SDoc
-pprIfaceType = ppr_ty tOP_PREC
-pprParendIfaceType = ppr_ty tYCON_PREC
+pprIfaceType = ppr_ty TopPrec
+pprParendIfaceType = ppr_ty TyConPrec
-isIfacePredTy :: IfaceType -> Bool
-isIfacePredTy _ = False
--- FIXME: fix this to print iface pred tys correctly
--- isIfacePredTy ty = isConstraintKind (ifaceTypeKind ty)
-
-ppr_ty :: Int -> IfaceType -> SDoc
+ppr_ty :: TyPrec -> IfaceType -> SDoc
ppr_ty _ (IfaceTyVar tyvar) = ppr tyvar
-ppr_ty ctxt_prec (IfaceTyConApp tc tys) = ppr_tc_app ppr_ty ctxt_prec tc tys
-
-ppr_ty _ (IfaceLitTy n) = ppr_tylit n
-
+ppr_ty ctxt_prec (IfaceTyConApp tc tys) = sdocWithDynFlags (pprTyTcApp ctxt_prec tc tys)
+ppr_ty _ (IfaceLitTy n) = ppr_tylit n
-- Function types
ppr_ty ctxt_prec (IfaceFunTy ty1 ty2)
= -- We don't want to lose synonyms, so we mustn't use splitFunTys here.
- maybeParen ctxt_prec fUN_PREC $
- sep (ppr_ty fUN_PREC ty1 : ppr_fun_tail ty2)
+ maybeParen ctxt_prec FunPrec $
+ sep [ppr_ty FunPrec ty1, sep (ppr_fun_tail ty2)]
where
- arr | isIfacePredTy ty1 = darrow
- | otherwise = arrow
-
ppr_fun_tail (IfaceFunTy ty1 ty2)
- = (arr <+> ppr_ty fUN_PREC ty1) : ppr_fun_tail ty2
+ = (arrow <+> ppr_ty FunPrec ty1) : ppr_fun_tail ty2
ppr_fun_tail other_ty
- = [arr <+> pprIfaceType other_ty]
+ = [arrow <+> pprIfaceType other_ty]
ppr_ty ctxt_prec (IfaceAppTy ty1 ty2)
- = maybeParen ctxt_prec tYCON_PREC $
- ppr_ty fUN_PREC ty1 <+> pprParendIfaceType ty2
+ = maybeParen ctxt_prec TyConPrec $
+ ppr_ty FunPrec ty1 <+> pprParendIfaceType ty2
+
+ppr_ty ctxt_prec ty
+ = maybeParen ctxt_prec FunPrec (ppr_iface_sigma_type True ty)
+
+instance Outputable IfaceTcArgs where
+ ppr tca = pprIfaceTcArgs tca
+
+pprIfaceTcArgs, pprParendIfaceTcArgs :: IfaceTcArgs -> SDoc
+pprIfaceTcArgs = ppr_tc_args TopPrec
+pprParendIfaceTcArgs = ppr_tc_args TyConPrec
-ppr_ty ctxt_prec ty@(IfaceForAllTy _ _)
- = maybeParen ctxt_prec fUN_PREC (pprIfaceForAllPart tvs theta (pprIfaceType tau))
- where
+ppr_tc_args :: TyPrec -> IfaceTcArgs -> SDoc
+ppr_tc_args ctx_prec args
+ = let pprTys t ts = ppr_ty ctx_prec t <+> ppr_tc_args ctx_prec ts
+ in case args of
+ ITC_Nil -> empty
+ ITC_Type t ts -> pprTys t ts
+ ITC_Kind t ts -> pprTys t ts
+
+-------------------
+ppr_iface_sigma_type :: Bool -> IfaceType -> SDoc
+ppr_iface_sigma_type show_foralls_unconditionally ty
+ = ppr_iface_forall_part show_foralls_unconditionally tvs theta (ppr tau)
+ where
(tvs, theta, tau) = splitIfaceSigmaTy ty
- -------------------
--- needs to handle type contexts and coercion contexts, hence the
--- generality
pprIfaceForAllPart :: Outputable a => [IfaceTvBndr] -> [a] -> SDoc -> SDoc
-pprIfaceForAllPart tvs ctxt doc
- = sep [ppr_tvs, pprIfaceContext ctxt, doc]
+pprIfaceForAllPart tvs ctxt sdoc = ppr_iface_forall_part False tvs ctxt sdoc
+
+ppr_iface_forall_part :: Outputable a
+ => Bool -> [IfaceTvBndr] -> [a] -> SDoc -> SDoc
+ppr_iface_forall_part show_foralls_unconditionally tvs ctxt sdoc
+ = sep [ if show_foralls_unconditionally
+ then pprIfaceForAll tvs
+ else pprUserIfaceForAll tvs
+ , pprIfaceContextArr ctxt
+ , sdoc]
+
+pprIfaceForAll :: [IfaceTvBndr] -> SDoc
+pprIfaceForAll [] = empty
+pprIfaceForAll tvs = ptext (sLit "forall") <+> pprIfaceTvBndrs tvs <> dot
+
+pprIfaceSigmaType :: IfaceType -> SDoc
+pprIfaceSigmaType ty = ppr_iface_sigma_type False ty
+
+pprUserIfaceForAll :: [IfaceTvBndr] -> SDoc
+pprUserIfaceForAll tvs
+ = sdocWithDynFlags $ \dflags ->
+ ppWhen (any tv_has_kind_var tvs || gopt Opt_PrintExplicitForalls dflags) $
+ pprIfaceForAll tvs
+ where
+ tv_has_kind_var (_,t) = not (isEmptyUniqSet (ifTyVarsOfType t))
+-------------------
+
+-- See equivalent function in TypeRep.lhs
+pprIfaceTyList :: TyPrec -> IfaceType -> IfaceType -> SDoc
+-- Given a type-level list (t1 ': t2), see if we can print
+-- it in list notation [t1, ...].
+-- Precondition: Opt_PrintExplicitKinds is off
+pprIfaceTyList ctxt_prec ty1 ty2
+ = case gather ty2 of
+ (arg_tys, Nothing)
+ -> char '\'' <> brackets (fsep (punctuate comma
+ (map (ppr_ty TopPrec) (ty1:arg_tys))))
+ (arg_tys, Just tl)
+ -> maybeParen ctxt_prec FunPrec $ hang (ppr_ty FunPrec ty1)
+ 2 (fsep [ colon <+> ppr_ty FunPrec ty | ty <- arg_tys ++ [tl]])
+ where
+ gather :: IfaceType -> ([IfaceType], Maybe IfaceType)
+ -- (gather ty) = (tys, Nothing) means ty is a list [t1, .., tn]
+ -- = (tys, Just tl) means ty is of form t1:t2:...tn:tl
+ gather (IfaceTyConApp tc tys)
+ | tcname == consDataConName
+ , (ITC_Kind _ (ITC_Type ty1 (ITC_Type ty2 ITC_Nil))) <- tys
+ , (args, tl) <- gather ty2
+ = (ty1:args, tl)
+ | tcname == nilDataConName
+ = ([], Nothing)
+ where tcname = ifaceTyConName tc
+ gather ty = ([], Just ty)
+
+pprIfaceTypeApp :: IfaceTyCon -> IfaceTcArgs -> SDoc
+pprIfaceTypeApp tc args = sdocWithDynFlags (pprTyTcApp TopPrec tc args)
+
+pprTyTcApp :: TyPrec -> IfaceTyCon -> IfaceTcArgs -> DynFlags -> SDoc
+pprTyTcApp ctxt_prec tc tys dflags
+ | ifaceTyConName tc == ipClassName
+ , ITC_Type (IfaceLitTy (IfaceStrTyLit n)) (ITC_Type ty ITC_Nil) <- tys
+ = char '?' <> ftext n <> ptext (sLit "::") <> ppr_ty TopPrec ty
+
+ | ifaceTyConName tc == consDataConName
+ , not (gopt Opt_PrintExplicitKinds dflags)
+ , ITC_Kind _ (ITC_Type ty1 (ITC_Type ty2 ITC_Nil)) <- tys
+ = pprIfaceTyList ctxt_prec ty1 ty2
+
+ | otherwise
+ = ppr_iface_tc_app ppr_ty ctxt_prec tc tys_wo_kinds
where
- ppr_tvs | null tvs = empty
- | otherwise = ptext (sLit "forall") <+> pprIfaceTvBndrs tvs <> dot
+ tys_wo_kinds = tcArgsIfaceTypes $ stripKindArgs dflags tys
--------------------
-ppr_tc_app :: (Int -> a -> SDoc) -> Int -> IfaceTyCon -> [a] -> SDoc
-ppr_tc_app _ _ tc [] = ppr_tc tc
-
-
-ppr_tc_app pp _ (IfaceTc n) [ty]
- | n == listTyConName
- = brackets (pp tOP_PREC ty)
- | n == parrTyConName
- = paBrackets (pp tOP_PREC ty)
-ppr_tc_app pp _ (IfaceTc n) tys
- | Just (ATyCon tc) <- wiredInNameTyThing_maybe n
- , Just sort <- tyConTuple_maybe tc
- , tyConArity tc == length tys
- = tupleParens sort (sep (punctuate comma (map (pp tOP_PREC) tys)))
-ppr_tc_app pp ctxt_prec tc tys
- = maybeParen ctxt_prec tYCON_PREC
- (sep [ppr_tc tc, nest 4 (sep (map (pp tYCON_PREC) tys))])
-
-ppr_tc :: IfaceTyCon -> SDoc
--- Wrap infix type constructors in parens
-ppr_tc tc = wrap (ifaceTyConName tc) (ppr tc)
+pprIfaceCoTcApp :: TyPrec -> IfaceTyCon -> [IfaceCoercion] -> SDoc
+pprIfaceCoTcApp ctxt_prec tc tys = ppr_iface_tc_app ppr_co ctxt_prec tc tys
+
+ppr_iface_tc_app :: (TyPrec -> a -> SDoc) -> TyPrec -> IfaceTyCon -> [a] -> SDoc
+ppr_iface_tc_app pp _ tc [ty]
+ | n == listTyConName = pprPromotionQuote tc <> brackets (pp TopPrec ty)
+ | n == parrTyConName = pprPromotionQuote tc <> paBrackets (pp TopPrec ty)
+ where
+ n = ifaceTyConName tc
+
+ppr_iface_tc_app pp ctxt_prec tc tys
+ | Just (tup_sort, tup_args) <- is_tuple
+ = pprPromotionQuote tc <>
+ tupleParens tup_sort (sep (punctuate comma (map (pp TopPrec) tup_args)))
+
+ | not (isSymOcc (nameOccName tc_name))
+ = pprIfacePrefixApp ctxt_prec (ppr tc) (map (pp TyConPrec) tys)
+
+ | [ty1,ty2] <- tys -- Infix, two arguments;
+ -- we know nothing of precedence though
+ = pprIfaceInfixApp pp ctxt_prec (ppr tc) ty1 ty2
+
+ | tc_name == liftedTypeKindTyConName || tc_name == unliftedTypeKindTyConName
+ = ppr tc -- Do not wrap *, # in parens
+
+ | otherwise
+ = pprIfacePrefixApp ctxt_prec (parens (ppr tc)) (map (pp TyConPrec) tys)
where
- -- The kind * does not get wrapped in parens.
- wrap name | name == liftedTypeKindTyConName = id
- wrap name = parenSymOcc (getOccName name)
+ tc_name = ifaceTyConName tc
+
+ is_tuple = case wiredInNameTyThing_maybe tc_name of
+ Just (ATyCon tc)
+ | Just sort <- tyConTuple_maybe tc
+ , tyConArity tc == length tys
+ -> Just (sort, tys)
+
+ | Just dc <- isPromotedDataCon_maybe tc
+ , let dc_tc = dataConTyCon dc
+ , isTupleTyCon dc_tc
+ , let arity = tyConArity dc_tc
+ ty_args = drop arity tys
+ , ty_args `lengthIs` arity
+ -> Just (tupleTyConSort tc, ty_args)
+
+ _ -> Nothing
+
ppr_tylit :: IfaceTyLit -> SDoc
ppr_tylit (IfaceNumTyLit n) = integer n
ppr_tylit (IfaceStrTyLit n) = text (show n)
pprIfaceCoercion, pprParendIfaceCoercion :: IfaceCoercion -> SDoc
-pprIfaceCoercion = ppr_co tOP_PREC
-pprParendIfaceCoercion = ppr_co tYCON_PREC
+pprIfaceCoercion = ppr_co TopPrec
+pprParendIfaceCoercion = ppr_co TyConPrec
-ppr_co :: Int -> IfaceCoercion -> SDoc
+ppr_co :: TyPrec -> IfaceCoercion -> SDoc
ppr_co _ (IfaceReflCo r ty) = angleBrackets (ppr ty) <> ppr_role r
ppr_co ctxt_prec (IfaceFunCo r co1 co2)
- = maybeParen ctxt_prec fUN_PREC $
- sep (ppr_co fUN_PREC co1 : ppr_fun_tail co2)
+ = maybeParen ctxt_prec FunPrec $
+ sep (ppr_co FunPrec co1 : ppr_fun_tail co2)
where
ppr_fun_tail (IfaceFunCo r co1 co2)
- = (arrow <> ppr_role r <+> ppr_co fUN_PREC co1) : ppr_fun_tail co2
+ = (arrow <> ppr_role r <+> ppr_co FunPrec co1) : ppr_fun_tail co2
ppr_fun_tail other_co
= [arrow <> ppr_role r <+> pprIfaceCoercion other_co]
ppr_co _ (IfaceTyConAppCo r tc cos)
- = parens (ppr_tc_app ppr_co tOP_PREC tc cos) <> ppr_role r
+ = parens (pprIfaceCoTcApp TopPrec tc cos) <> ppr_role r
ppr_co ctxt_prec (IfaceAppCo co1 co2)
- = maybeParen ctxt_prec tYCON_PREC $
- ppr_co fUN_PREC co1 <+> pprParendIfaceCoercion co2
+ = maybeParen ctxt_prec TyConPrec $
+ ppr_co FunPrec co1 <+> pprParendIfaceCoercion co2
ppr_co ctxt_prec co@(IfaceForAllCo _ _)
- = maybeParen ctxt_prec fUN_PREC (sep [ppr_tvs, pprIfaceCoercion inner_co])
+ = maybeParen ctxt_prec FunPrec (sep [ppr_tvs, pprIfaceCoercion inner_co])
where
(tvs, inner_co) = split_co co
ppr_tvs = ptext (sLit "forall") <+> pprIfaceTvBndrs tvs <> dot
@@ -322,16 +575,16 @@ ppr_co ctxt_prec co@(IfaceForAllCo _ _)
ppr_co _ (IfaceCoVarCo covar) = ppr covar
ppr_co ctxt_prec (IfaceUnivCo r ty1 ty2)
- = maybeParen ctxt_prec tYCON_PREC $
+ = maybeParen ctxt_prec TyConPrec $
ptext (sLit "UnivCo") <+> ppr r <+>
pprParendIfaceType ty1 <+> pprParendIfaceType ty2
ppr_co ctxt_prec (IfaceInstCo co ty)
- = maybeParen ctxt_prec tYCON_PREC $
+ = maybeParen ctxt_prec TyConPrec $
ptext (sLit "Inst") <+> pprParendIfaceCoercion co <+> pprParendIfaceType ty
ppr_co ctxt_prec (IfaceAxiomRuleCo tc tys cos)
- = maybeParen ctxt_prec tYCON_PREC
+ = maybeParen ctxt_prec TyConPrec
(sep [ppr tc, nest 4 (sep (map pprParendIfaceType tys ++ map pprParendIfaceCoercion cos))])
ppr_co ctxt_prec co
@@ -346,9 +599,9 @@ ppr_co ctxt_prec co
; IfaceSubCo co -> (ptext (sLit "Sub"), [co])
; _ -> panic "pprIfaceCo" }
-ppr_special_co :: Int -> SDoc -> [IfaceCoercion] -> SDoc
+ppr_special_co :: TyPrec -> SDoc -> [IfaceCoercion] -> SDoc
ppr_special_co ctxt_prec doc cos
- = maybeParen ctxt_prec tYCON_PREC
+ = maybeParen ctxt_prec TyConPrec
(sep [doc, nest 4 (sep (map pprParendIfaceCoercion cos))])
ppr_role :: Role -> SDoc
@@ -360,14 +613,30 @@ ppr_role r = underscore <> pp_role
-------------------
instance Outputable IfaceTyCon where
- ppr = ppr . ifaceTyConName
+ ppr tc = pprPromotionQuote tc <> ppr (ifaceTyConName tc)
+
+pprPromotionQuote :: IfaceTyCon -> SDoc
+pprPromotionQuote (IfacePromotedDataCon _ ) = char '\''
+pprPromotionQuote (IfacePromotedTyCon _) = ifPprDebug (char '\'')
+pprPromotionQuote _ = empty
instance Outputable IfaceCoercion where
ppr = pprIfaceCoercion
instance Binary IfaceTyCon where
- put_ bh (IfaceTc ext) = put_ bh ext
- get bh = liftM IfaceTc (get bh)
+ put_ bh tc =
+ case tc of
+ IfaceTc n -> putByte bh 0 >> put_ bh n
+ IfacePromotedDataCon n -> putByte bh 1 >> put_ bh n
+ IfacePromotedTyCon n -> putByte bh 2 >> put_ bh n
+
+ get bh =
+ do tc <- getByte bh
+ case tc of
+ 0 -> get bh >>= return . IfaceTc
+ 1 -> get bh >>= return . IfacePromotedDataCon
+ 2 -> get bh >>= return . IfacePromotedTyCon
+ _ -> panic ("get IfaceTyCon " ++ show tc)
instance Outputable IfaceTyLit where
ppr = ppr_tylit
@@ -385,15 +654,36 @@ instance Binary IfaceTyLit where
; return (IfaceStrTyLit n) }
_ -> panic ("get IfaceTyLit " ++ show tag)
+instance Binary IfaceTcArgs where
+ put_ bh tk =
+ case tk of
+ ITC_Type t ts -> putByte bh 0 >> put_ bh t >> put_ bh ts
+ ITC_Kind t ts -> putByte bh 1 >> put_ bh t >> put_ bh ts
+ ITC_Nil -> putByte bh 2
+
+ get bh =
+ do c <- getByte bh
+ case c of
+ 0 -> do
+ t <- get bh
+ ts <- get bh
+ return $! ITC_Type t ts
+ 1 -> do
+ t <- get bh
+ ts <- get bh
+ return $! ITC_Kind t ts
+ 2 -> return ITC_Nil
+ _ -> panic ("get IfaceTcArgs " ++ show c)
+
-------------------
-pprIfaceContext :: Outputable a => [a] -> SDoc
+pprIfaceContextArr :: Outputable a => [a] -> SDoc
-- Prints "(C a, D b) =>", including the arrow
-pprIfaceContext [] = empty
-pprIfaceContext theta = ppr_preds theta <+> darrow
+pprIfaceContextArr [] = empty
+pprIfaceContextArr theta = pprIfaceContext theta <+> darrow
-ppr_preds :: Outputable a => [a] -> SDoc
-ppr_preds [pred] = ppr pred -- No parens
-ppr_preds preds = parens (sep (punctuate comma (map ppr preds)))
+pprIfaceContext :: Outputable a => [a] -> SDoc
+pprIfaceContext [pred] = ppr pred -- No parens
+pprIfaceContext preds = parens (fsep (punctuate comma (map ppr preds)))
instance Binary IfaceType where
put_ bh (IfaceForAllTy aa ab) = do
@@ -411,6 +701,10 @@ instance Binary IfaceType where
putByte bh 3
put_ bh ag
put_ bh ah
+ put_ bh (IfaceDFunTy ag ah) = do
+ putByte bh 4
+ put_ bh ag
+ put_ bh ah
put_ bh (IfaceTyConApp tc tys)
= do { putByte bh 5; put_ bh tc; put_ bh tys }
@@ -431,9 +725,11 @@ instance Binary IfaceType where
3 -> do ag <- get bh
ah <- get bh
return (IfaceFunTy ag ah)
+ 4 -> do ag <- get bh
+ ah <- get bh
+ return (IfaceDFunTy ag ah)
5 -> do { tc <- get bh; tys <- get bh
; return (IfaceTyConApp tc tys) }
-
30 -> do n <- get bh
return (IfaceLitTy n)
@@ -553,7 +849,7 @@ instance Binary IfaceCoercion where
b <- get bh
c <- get bh
return $ IfaceAxiomRuleCo a b c
- _ -> panic ("get IfaceCoercion " ++ show tag)
+ _ -> panic ("get IfaceCoercion " ++ show tag)
\end{code}
@@ -585,8 +881,10 @@ toIfaceType :: Type -> IfaceType
-- Synonyms are retained in the interface type
toIfaceType (TyVarTy tv) = IfaceTyVar (toIfaceTyVar tv)
toIfaceType (AppTy t1 t2) = IfaceAppTy (toIfaceType t1) (toIfaceType t2)
-toIfaceType (FunTy t1 t2) = IfaceFunTy (toIfaceType t1) (toIfaceType t2)
-toIfaceType (TyConApp tc tys) = IfaceTyConApp (toIfaceTyCon tc) (toIfaceTypes tys)
+toIfaceType (FunTy t1 t2)
+ | isPredTy t1 = IfaceDFunTy (toIfaceType t1) (toIfaceType t2)
+ | otherwise = IfaceFunTy (toIfaceType t1) (toIfaceType t2)
+toIfaceType (TyConApp tc tys) = IfaceTyConApp (toIfaceTyCon tc) (toIfaceTcArgs tc tys)
toIfaceType (LitTy n) = IfaceLitTy (toIfaceTyLit n)
toIfaceType (ForAllTy tv t) = IfaceForAllTy (toIfaceTvBndr tv) (toIfaceType t)
@@ -598,7 +896,11 @@ toIfaceCoVar = occNameFS . getOccName
----------------
toIfaceTyCon :: TyCon -> IfaceTyCon
-toIfaceTyCon = toIfaceTyCon_name . tyConName
+toIfaceTyCon tc
+ | isPromotedDataCon tc = IfacePromotedDataCon tc_name
+ | isPromotedTyCon tc = IfacePromotedTyCon tc_name
+ | otherwise = IfaceTc tc_name
+ where tc_name = tyConName tc
toIfaceTyCon_name :: Name -> IfaceTyCon
toIfaceTyCon_name = IfaceTc
@@ -647,4 +949,3 @@ toIfaceCoercion (AxiomRuleCo co ts cs) = IfaceAxiomRuleCo
(map toIfaceType ts)
(map toIfaceCoercion cs)
\end{code}
-
diff --git a/compiler/iface/LoadIface.lhs b/compiler/iface/LoadIface.lhs
index d7877943261e..2be6e9d4d83d 100644
--- a/compiler/iface/LoadIface.lhs
+++ b/compiler/iface/LoadIface.lhs
@@ -6,6 +6,7 @@
Loading interface files
\begin{code}
+{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module LoadIface (
-- RnM/TcM functions
@@ -352,13 +353,13 @@ wantHiBootFile dflags eps mod from
-- The boot-ness of the requested interface,
-- based on the dependencies in directly-imported modules
where
- this_package = thisPackage dflags == modulePackageId mod
+ this_package = thisPackage dflags == modulePackageKey mod
badSourceImport :: Module -> SDoc
badSourceImport mod
= hang (ptext (sLit "You cannot {-# SOURCE #-} import a module from another package"))
2 (ptext (sLit "but") <+> quotes (ppr mod) <+> ptext (sLit "is from package")
- <+> quotes (ppr (modulePackageId mod)))
+ <+> quotes (ppr (modulePackageKey mod)))
\end{code}
Note [Care with plugin imports]
@@ -391,7 +392,7 @@ compiler expects.
-- the declaration itself, will find the fully-glorious Name
--
-- We handle ATs specially. They are not main declarations, but also not
--- implict things (in particular, adding them to `implicitTyThings' would mess
+-- implicit things (in particular, adding them to `implicitTyThings' would mess
-- things up in the renaming/type checking of source programs).
-----------------------------------------------------
@@ -416,7 +417,6 @@ loadDecl ignore_prags mod (_version, decl)
= do { -- Populate the name cache with final versions of all
-- the names associated with the decl
main_name <- lookupOrig mod (ifName decl)
--- ; traceIf (text "Loading decl for " <> ppr main_name)
-- Typecheck the thing, lazily
-- NB. Firstly, the laziness is there in case we never need the
@@ -445,11 +445,11 @@ loadDecl ignore_prags mod (_version, decl)
-- [ "MkT" -> , "x" -> , ... ]
-- (where the "MkT" is the *Name* associated with MkT, etc.)
--
- -- We do this by mapping the implict_names to the associated
+ -- We do this by mapping the implicit_names to the associated
-- TyThings. By the invariant on ifaceDeclImplicitBndrs and
-- implicitTyThings, we can use getOccName on the implicit
-- TyThings to make this association: each Name's OccName should
- -- be the OccName of exactly one implictTyThing. So the key is
+ -- be the OccName of exactly one implicitTyThing. So the key is
-- to define a "mini-env"
--
-- [ 'MkT' -> , 'x' -> , ... ]
@@ -457,7 +457,7 @@ loadDecl ignore_prags mod (_version, decl)
--
-- However, there is a subtlety: due to how type checking needs
-- to be staged, we can't poke on the forkM'd thunks inside the
- -- implictTyThings while building this mini-env.
+ -- implicitTyThings while building this mini-env.
-- If we poke these thunks too early, two problems could happen:
-- (1) When processing mutually recursive modules across
-- hs-boot boundaries, poking too early will do the
@@ -490,9 +490,11 @@ loadDecl ignore_prags mod (_version, decl)
pprPanic "loadDecl" (ppr main_name <+> ppr n $$ ppr (decl))
; implicit_names <- mapM (lookupOrig mod) (ifaceDeclImplicitBndrs decl)
+
+-- ; traceIf (text "Loading decl for " <> ppr main_name $$ ppr implicit_names)
; return $ (main_name, thing) :
-- uses the invariant that implicit_names and
- -- implictTyThings are bijective
+ -- implicitTyThings are bijective
[(n, lookup n) | n <- implicit_names]
}
where
@@ -571,7 +573,7 @@ findAndReadIface doc_str mod hi_boot_file
(ml_hi_file loc)
-- See Note [Home module load error]
- if thisPackage dflags == modulePackageId mod &&
+ if thisPackage dflags == modulePackageKey mod &&
not (isOneShot (ghcMode dflags))
then return (Failed (homeModError mod loc))
else do r <- read_file file_path
@@ -751,7 +753,7 @@ pprModIface iface
, vcat (map pprUsage (mi_usages iface))
, vcat (map pprIfaceAnnotation (mi_anns iface))
, pprFixities (mi_fixities iface)
- , vcat (map pprIfaceDecl (mi_decls iface))
+ , vcat [ppr ver $$ nest 2 (ppr decl) | (ver,decl) <- mi_decls iface]
, vcat (map ppr (mi_insts iface))
, vcat (map ppr (mi_fam_insts iface))
, vcat (map ppr (mi_rules iface))
@@ -817,10 +819,6 @@ pprDeps (Deps { dep_mods = mods, dep_pkgs = pkgs, dep_orphs = orphs,
ppr_boot True = text "[boot]"
ppr_boot False = empty
-pprIfaceDecl :: (Fingerprint, IfaceDecl) -> SDoc
-pprIfaceDecl (ver, decl)
- = ppr ver $$ nest 2 (ppr decl)
-
pprFixities :: [(OccName, Fixity)] -> SDoc
pprFixities [] = empty
pprFixities fixes = ptext (sLit "fixities") <+> pprWithCommas pprFix fixes
@@ -878,7 +876,9 @@ badIfaceFile file err
hiModuleNameMismatchWarn :: Module -> Module -> MsgDoc
hiModuleNameMismatchWarn requested_mod read_mod =
- withPprStyle defaultUserStyle $
+ -- ToDo: This will fail to have enough qualification when the package IDs
+ -- are the same
+ withPprStyle (mkUserStyle alwaysQualify AllTheWay) $
-- we want the Modules below to be qualified with package names,
-- so reset the PrintUnqualified setting.
hsep [ ptext (sLit "Something is amiss; requested module ")
diff --git a/compiler/iface/MkIface.lhs b/compiler/iface/MkIface.lhs
index 379b39de583a..8a30816855c6 100644
--- a/compiler/iface/MkIface.lhs
+++ b/compiler/iface/MkIface.lhs
@@ -4,6 +4,8 @@
%
\begin{code}
+{-# LANGUAGE CPP, NondecreasingIndentation #-}
+
-- | Module for constructing @ModIface@ values (interface files),
-- writing them to disk and comparing two versions to see if
-- recompilation is required.
@@ -78,6 +80,7 @@ import DataCon
import PatSyn
import Type
import TcType
+import TysPrim ( alphaTyVars )
import InstEnv
import FamInstEnv
import TcRnMonad
@@ -215,12 +218,12 @@ mkDependencies
-- on M.hi-boot, and hence that we should do the hi-boot consistency
-- check.)
- pkgs | th_used = insertList thPackageId (imp_dep_pkgs imports)
+ pkgs | th_used = insertList thPackageKey (imp_dep_pkgs imports)
| otherwise = imp_dep_pkgs imports
-- Set the packages required to be Safe according to Safe Haskell.
-- See Note [RnNames . Tracking Trust Transitively]
- sorted_pkgs = sortBy stablePackageIdCmp pkgs
+ sorted_pkgs = sortBy stablePackageKeyCmp pkgs
trust_pkgs = imp_trust_pkgs imports
dep_pkgs' = map (\x -> (x, x `elem` trust_pkgs)) sorted_pkgs
@@ -556,7 +559,7 @@ addFingerprints hsc_env mb_old_fingerprint iface0 new_decls
-- dependency tree. We only care about orphan modules in the current
-- package, because changes to orphans outside this package will be
-- tracked by the usage on the ABI hash of package modules that we import.
- let orph_mods = filter ((== this_pkg) . modulePackageId)
+ let orph_mods = filter ((== this_pkg) . modulePackageKey)
$ dep_orphs sorted_deps
dep_orphan_hashes <- getOrphanHashes hsc_env orph_mods
@@ -658,7 +661,7 @@ getOrphanHashes hsc_env mods = do
sortDependencies :: Dependencies -> Dependencies
sortDependencies d
= Deps { dep_mods = sortBy (compare `on` (moduleNameFS.fst)) (dep_mods d),
- dep_pkgs = sortBy (stablePackageIdCmp `on` fst) (dep_pkgs d),
+ dep_pkgs = sortBy (stablePackageKeyCmp `on` fst) (dep_pkgs d),
dep_orphs = sortBy stableModuleCmp (dep_orphs d),
dep_finsts = sortBy stableModuleCmp (dep_finsts d) }
\end{code}
@@ -876,6 +879,13 @@ instOrphWarn :: DynFlags -> PrintUnqualified -> ClsInst -> WarnMsg
instOrphWarn dflags unqual inst
= mkWarnMsg dflags (getSrcSpan inst) unqual $
hang (ptext (sLit "Orphan instance:")) 2 (pprInstanceHdr inst)
+ $$ text "To avoid this"
+ $$ nest 4 (vcat possibilities)
+ where
+ possibilities =
+ text "move the instance declaration to the module of the class or of the type, or" :
+ text "wrap the type with a newtype and declare the instance on the new type." :
+ []
ruleOrphWarn :: DynFlags -> PrintUnqualified -> Module -> IfaceRule -> WarnMsg
ruleOrphWarn dflags unqual mod rule
@@ -936,7 +946,7 @@ mk_mod_usage_info :: PackageIfaceTable
-> NameSet
-> [Usage]
mk_mod_usage_info pit hsc_env this_mod direct_imports used_names
- = mapCatMaybes mkUsage usage_mods
+ = mapMaybe mkUsage usage_mods
where
hpt = hsc_HPT hsc_env
dflags = hsc_dflags hsc_env
@@ -979,7 +989,7 @@ mk_mod_usage_info pit hsc_env this_mod direct_imports used_names
-- things in *this* module
= Nothing
- | modulePackageId mod /= this_pkg
+ | modulePackageKey mod /= this_pkg
= Just UsagePackageModule{ usg_mod = mod,
usg_mod_hash = mod_hash,
usg_safe = imp_safe }
@@ -1131,27 +1141,35 @@ recompileRequired _ = True
-- first element is a bool saying if we should recompile the object file
-- and the second is maybe the interface file, where Nothng means to
-- rebuild the interface file not use the exisitng one.
-checkOldIface :: HscEnv
- -> ModSummary
- -> SourceModified
- -> Maybe ModIface -- Old interface from compilation manager, if any
- -> IO (RecompileRequired, Maybe ModIface)
+checkOldIface
+ :: HscEnv
+ -> ModSummary
+ -> SourceModified
+ -> Maybe ModIface -- Old interface from compilation manager, if any
+ -> IO (RecompileRequired, Maybe ModIface)
checkOldIface hsc_env mod_summary source_modified maybe_iface
= do let dflags = hsc_dflags hsc_env
showPass dflags $
- "Checking old interface for " ++ (showPpr dflags $ ms_mod mod_summary)
+ "Checking old interface for " ++
+ (showPpr dflags $ ms_mod mod_summary)
initIfaceCheck hsc_env $
check_old_iface hsc_env mod_summary source_modified maybe_iface
-check_old_iface :: HscEnv -> ModSummary -> SourceModified -> Maybe ModIface
- -> IfG (RecompileRequired, Maybe ModIface)
+check_old_iface
+ :: HscEnv
+ -> ModSummary
+ -> SourceModified
+ -> Maybe ModIface
+ -> IfG (RecompileRequired, Maybe ModIface)
+
check_old_iface hsc_env mod_summary src_modified maybe_iface
= let dflags = hsc_dflags hsc_env
getIface =
case maybe_iface of
Just _ -> do
- traceIf (text "We already have the old interface for" <+> ppr (ms_mod mod_summary))
+ traceIf (text "We already have the old interface for" <+>
+ ppr (ms_mod mod_summary))
return maybe_iface
Nothing -> loadIface
@@ -1300,7 +1318,7 @@ checkDependencies hsc_env summary iface
return (RecompBecause reason)
else
return UpToDate
- where pkg = modulePackageId mod
+ where pkg = modulePackageKey mod
_otherwise -> return (RecompBecause reason)
needInterface :: Module -> (ModIface -> IfG RecompileRequired)
@@ -1329,7 +1347,7 @@ needInterface mod continue
-- | Given the usage information extracted from the old
-- M.hi file for the module being compiled, figure out
-- whether M needs to be recompiled.
-checkModUsage :: PackageId -> Usage -> IfG RecompileRequired
+checkModUsage :: PackageKey -> Usage -> IfG RecompileRequired
checkModUsage _this_pkg UsagePackageModule{
usg_mod = mod,
usg_mod_hash = old_mod_hash }
@@ -1458,10 +1476,10 @@ checkList (check:checks) = do recompile <- check
\begin{code}
tyThingToIfaceDecl :: TyThing -> IfaceDecl
tyThingToIfaceDecl (AnId id) = idToIfaceDecl id
-tyThingToIfaceDecl (ATyCon tycon) = tyConToIfaceDecl emptyTidyEnv tycon
+tyThingToIfaceDecl (ATyCon tycon) = snd (tyConToIfaceDecl emptyTidyEnv tycon)
tyThingToIfaceDecl (ACoAxiom ax) = coAxiomToIfaceDecl ax
tyThingToIfaceDecl (AConLike cl) = case cl of
- RealDataCon dc -> pprPanic "toIfaceDecl" (ppr dc) -- Should be trimmed out earlier
+ RealDataCon dc -> dataConToIfaceDecl dc -- for ppr purposes only
PatSynCon ps -> patSynToIfaceDecl ps
--------------------------
@@ -1476,29 +1494,36 @@ idToIfaceDecl id
ifIdDetails = toIfaceIdDetails (idDetails id),
ifIdInfo = toIfaceIdInfo (idInfo id) }
+--------------------------
+dataConToIfaceDecl :: DataCon -> IfaceDecl
+dataConToIfaceDecl dataCon
+ = IfaceId { ifName = getOccName dataCon,
+ ifType = toIfaceType (dataConUserType dataCon),
+ ifIdDetails = IfVanillaId,
+ ifIdInfo = NoInfo }
+
--------------------------
patSynToIfaceDecl :: PatSyn -> IfaceDecl
patSynToIfaceDecl ps
= IfacePatSyn { ifName = getOccName . getName $ ps
- , ifPatHasWrapper = isJust $ patSynWrapper ps
+ , ifPatMatcher = matcher
+ , ifPatWrapper = wrapper
, ifPatIsInfix = patSynIsInfix ps
, ifPatUnivTvs = toIfaceTvBndrs univ_tvs'
, ifPatExTvs = toIfaceTvBndrs ex_tvs'
, ifPatProvCtxt = tidyToIfaceContext env2 prov_theta
, ifPatReqCtxt = tidyToIfaceContext env2 req_theta
- , ifPatArgs = map toIfaceArg args
+ , ifPatArgs = map (tidyToIfaceType env2) args
, ifPatTy = tidyToIfaceType env2 rhs_ty
}
where
- toIfaceArg var = (occNameFS (getOccName var),
- tidyToIfaceType env2 (varType var))
-
- (univ_tvs, ex_tvs, (prov_theta, req_theta)) = patSynSig ps
- args = patSynArgs ps
- rhs_ty = patSynType ps
+ (univ_tvs, ex_tvs, prov_theta, req_theta, args, rhs_ty) = patSynSig ps
(env1, univ_tvs') = tidyTyVarBndrs emptyTidyEnv univ_tvs
(env2, ex_tvs') = tidyTyVarBndrs env1 ex_tvs
+ matcher = idName (patSynMatcher ps)
+ wrapper = fmap idName (patSynWrapper ps)
+
--------------------------
coAxiomToIfaceDecl :: CoAxiom br -> IfaceDecl
@@ -1509,19 +1534,19 @@ coAxiomToIfaceDecl ax@(CoAxiom { co_ax_tc = tycon, co_ax_branches = branches
= IfaceAxiom { ifName = name
, ifTyCon = toIfaceTyCon tycon
, ifRole = role
- , ifAxBranches = brListMap (coAxBranchToIfaceBranch
- emptyTidyEnv
- (brListMap coAxBranchLHS branches)) branches }
+ , ifAxBranches = brListMap (coAxBranchToIfaceBranch tycon
+ (brListMap coAxBranchLHS branches))
+ branches }
where
name = getOccName ax
-- 2nd parameter is the list of branch LHSs, for conversion from incompatible branches
-- to incompatible indices
--- See [Storing compatibility] in CoAxiom
-coAxBranchToIfaceBranch :: TidyEnv -> [[Type]] -> CoAxBranch -> IfaceAxBranch
-coAxBranchToIfaceBranch env0 lhs_s
+-- See Note [Storing compatibility] in CoAxiom
+coAxBranchToIfaceBranch :: TyCon -> [[Type]] -> CoAxBranch -> IfaceAxBranch
+coAxBranchToIfaceBranch tc lhs_s
branch@(CoAxBranch { cab_incomps = incomps })
- = (coAxBranchToIfaceBranch' env0 branch) { ifaxbIncomps = iface_incomps }
+ = (coAxBranchToIfaceBranch' tc branch) { ifaxbIncomps = iface_incomps }
where
iface_incomps = map (expectJust "iface_incomps"
. (flip findIndex lhs_s
@@ -1529,63 +1554,91 @@ coAxBranchToIfaceBranch env0 lhs_s
. coAxBranchLHS) incomps
-- use this one for standalone branches without incompatibles
-coAxBranchToIfaceBranch' :: TidyEnv -> CoAxBranch -> IfaceAxBranch
-coAxBranchToIfaceBranch' env0
- (CoAxBranch { cab_tvs = tvs, cab_lhs = lhs
- , cab_roles = roles, cab_rhs = rhs })
+coAxBranchToIfaceBranch' :: TyCon -> CoAxBranch -> IfaceAxBranch
+coAxBranchToIfaceBranch' tc (CoAxBranch { cab_tvs = tvs, cab_lhs = lhs
+ , cab_roles = roles, cab_rhs = rhs })
= IfaceAxBranch { ifaxbTyVars = toIfaceTvBndrs tv_bndrs
- , ifaxbLHS = map (tidyToIfaceType env1) lhs
+ , ifaxbLHS = tidyToIfaceTcArgs env1 tc lhs
, ifaxbRoles = roles
, ifaxbRHS = tidyToIfaceType env1 rhs
, ifaxbIncomps = [] }
where
- (env1, tv_bndrs) = tidyTyClTyVarBndrs env0 tvs
+ (env1, tv_bndrs) = tidyTyClTyVarBndrs emptyTidyEnv tvs
-- Don't re-bind in-scope tyvars
-- See Note [CoAxBranch type variables] in CoAxiom
-----------------
-tyConToIfaceDecl :: TidyEnv -> TyCon -> IfaceDecl
+tyConToIfaceDecl :: TidyEnv -> TyCon -> (TidyEnv, IfaceDecl)
-- We *do* tidy TyCons, because they are not (and cannot
-- conveniently be) built in tidy form
+-- The returned TidyEnv is the one after tidying the tyConTyVars
tyConToIfaceDecl env tycon
| Just clas <- tyConClass_maybe tycon
= classToIfaceDecl env clas
| Just syn_rhs <- synTyConRhs_maybe tycon
- = IfaceSyn { ifName = getOccName tycon,
- ifTyVars = toIfaceTvBndrs tyvars,
- ifRoles = tyConRoles tycon,
- ifSynRhs = to_ifsyn_rhs syn_rhs,
- ifSynKind = tidyToIfaceType env1 (synTyConResKind tycon) }
+ = ( tc_env1
+ , IfaceSyn { ifName = getOccName tycon,
+ ifTyVars = if_tc_tyvars,
+ ifRoles = tyConRoles tycon,
+ ifSynRhs = to_ifsyn_rhs syn_rhs,
+ ifSynKind = tidyToIfaceType tc_env1 (synTyConResKind tycon) })
| isAlgTyCon tycon
- = IfaceData { ifName = getOccName tycon,
- ifCType = tyConCType tycon,
- ifTyVars = toIfaceTvBndrs tyvars,
- ifRoles = tyConRoles tycon,
- ifCtxt = tidyToIfaceContext env1 (tyConStupidTheta tycon),
- ifCons = ifaceConDecls (algTyConRhs tycon),
- ifRec = boolToRecFlag (isRecursiveTyCon tycon),
- ifGadtSyntax = isGadtSyntaxTyCon tycon,
- ifPromotable = isJust (promotableTyCon_maybe tycon),
- ifAxiom = fmap coAxiomName (tyConFamilyCoercion_maybe tycon) }
+ = ( tc_env1
+ , IfaceData { ifName = getOccName tycon,
+ ifCType = tyConCType tycon,
+ ifTyVars = if_tc_tyvars,
+ ifRoles = tyConRoles tycon,
+ ifCtxt = tidyToIfaceContext tc_env1 (tyConStupidTheta tycon),
+ ifCons = ifaceConDecls (algTyConRhs tycon),
+ ifRec = boolToRecFlag (isRecursiveTyCon tycon),
+ ifGadtSyntax = isGadtSyntaxTyCon tycon,
+ ifPromotable = isJust (promotableTyCon_maybe tycon),
+ ifParent = parent })
| isForeignTyCon tycon
- = IfaceForeign { ifName = getOccName tycon,
- ifExtName = tyConExtName tycon }
-
- | otherwise = pprPanic "toIfaceDecl" (ppr tycon)
+ = (env, IfaceForeign { ifName = getOccName tycon,
+ ifExtName = tyConExtName tycon })
+
+ | otherwise -- FunTyCon, PrimTyCon, promoted TyCon/DataCon
+ -- For pretty printing purposes only.
+ = ( env
+ , IfaceData { ifName = getOccName tycon,
+ ifCType = Nothing,
+ ifTyVars = funAndPrimTyVars,
+ ifRoles = tyConRoles tycon,
+ ifCtxt = [],
+ ifCons = IfDataTyCon [],
+ ifRec = boolToRecFlag False,
+ ifGadtSyntax = False,
+ ifPromotable = False,
+ ifParent = IfNoParent })
where
- (env1, tyvars) = tidyTyClTyVarBndrs env (tyConTyVars tycon)
+ (tc_env1, tc_tyvars) = tidyTyClTyVarBndrs env (tyConTyVars tycon)
+ if_tc_tyvars = toIfaceTvBndrs tc_tyvars
+
+ funAndPrimTyVars = toIfaceTvBndrs $ take (tyConArity tycon) alphaTyVars
+
+ parent = case tyConFamInstSig_maybe tycon of
+ Just (tc, ty, ax) -> IfDataInstance (coAxiomName ax)
+ (toIfaceTyCon tc)
+ (tidyToIfaceTcArgs tc_env1 tc ty)
+ Nothing -> IfNoParent
+
+ to_ifsyn_rhs OpenSynFamilyTyCon = IfaceOpenSynFamilyTyCon
+ to_ifsyn_rhs (ClosedSynFamilyTyCon ax) = IfaceClosedSynFamilyTyCon axn ibr
+ where defs = fromBranchList $ coAxiomBranches ax
+ ibr = map (coAxBranchToIfaceBranch' tycon) defs
+ axn = coAxiomName ax
+ to_ifsyn_rhs AbstractClosedSynFamilyTyCon
+ = IfaceAbstractClosedSynFamilyTyCon
- to_ifsyn_rhs OpenSynFamilyTyCon = IfaceOpenSynFamilyTyCon
- to_ifsyn_rhs (ClosedSynFamilyTyCon ax)
- = IfaceClosedSynFamilyTyCon (coAxiomName ax)
- to_ifsyn_rhs AbstractClosedSynFamilyTyCon = IfaceAbstractClosedSynFamilyTyCon
to_ifsyn_rhs (SynonymTyCon ty)
- = IfaceSynonymTyCon (tidyToIfaceType env1 ty)
+ = IfaceSynonymTyCon (tidyToIfaceType tc_env1 ty)
- to_ifsyn_rhs (BuiltInSynFamTyCon {}) = pprPanic "toIfaceDecl: BuiltInFamTyCon" (ppr tycon)
+ to_ifsyn_rhs (BuiltInSynFamTyCon {})
+ = IfaceBuiltInSynFamTyCon
ifaceConDecls (NewTyCon { data_con = con }) = IfNewTyCon (ifaceConDecl con)
@@ -1601,23 +1654,28 @@ tyConToIfaceDecl env tycon
= IfCon { ifConOcc = getOccName (dataConName data_con),
ifConInfix = dataConIsInfix data_con,
ifConWrapper = isJust (dataConWrapId_maybe data_con),
- ifConUnivTvs = toIfaceTvBndrs univ_tvs',
ifConExTvs = toIfaceTvBndrs ex_tvs',
- ifConEqSpec = to_eq_spec eq_spec,
- ifConCtxt = tidyToIfaceContext env2 theta,
- ifConArgTys = map (tidyToIfaceType env2) arg_tys,
+ ifConEqSpec = map to_eq_spec eq_spec,
+ ifConCtxt = tidyToIfaceContext con_env2 theta,
+ ifConArgTys = map (tidyToIfaceType con_env2) arg_tys,
ifConFields = map getOccName
(dataConFieldLabels data_con),
- ifConStricts = map (toIfaceBang env2) (dataConRepBangs data_con) }
+ ifConStricts = map (toIfaceBang con_env2) (dataConRepBangs data_con) }
where
(univ_tvs, ex_tvs, eq_spec, theta, arg_tys, _) = dataConFullSig data_con
- -- Start with 'emptyTidyEnv' not 'env1', because the type of the
- -- data constructor is fully standalone
- (env1, univ_tvs') = tidyTyVarBndrs emptyTidyEnv univ_tvs
- (env2, ex_tvs') = tidyTyVarBndrs env1 ex_tvs
- to_eq_spec spec = [ (getOccName (tidyTyVar env2 tv), tidyToIfaceType env2 ty)
- | (tv,ty) <- spec]
+ -- Tidy the univ_tvs of the data constructor to be identical
+ -- to the tyConTyVars of the type constructor. This means
+ -- (a) we don't need to redundantly put them into the interface file
+ -- (b) when pretty-printing an Iface data declaration in H98-style syntax,
+ -- we know that the type variables will line up
+ -- The latter (b) is important because we pretty-print type construtors
+ -- by converting to IfaceSyn and pretty-printing that
+ con_env1 = (fst tc_env1, mkVarEnv (zipEqual "ifaceConDecl" univ_tvs tc_tyvars))
+ -- A bit grimy, perhaps, but it's simple!
+
+ (con_env2, ex_tvs') = tidyTyVarBndrs con_env1 ex_tvs
+ to_eq_spec (tv,ty) = (toIfaceTyVar (tidyTyVar con_env2 tv), tidyToIfaceType con_env2 ty)
toIfaceBang :: TidyEnv -> HsBang -> IfaceBang
toIfaceBang _ HsNoBang = IfNoBang
@@ -1626,17 +1684,18 @@ toIfaceBang env (HsUnpack (Just co)) = IfUnpackCo (toIfaceCoercion (tidyCo env c
toIfaceBang _ HsStrict = IfStrict
toIfaceBang _ (HsUserBang {}) = panic "toIfaceBang"
-classToIfaceDecl :: TidyEnv -> Class -> IfaceDecl
+classToIfaceDecl :: TidyEnv -> Class -> (TidyEnv, IfaceDecl)
classToIfaceDecl env clas
- = IfaceClass { ifCtxt = tidyToIfaceContext env1 sc_theta,
- ifName = getOccName (classTyCon clas),
- ifTyVars = toIfaceTvBndrs clas_tyvars',
- ifRoles = tyConRoles (classTyCon clas),
- ifFDs = map toIfaceFD clas_fds,
- ifATs = map toIfaceAT clas_ats,
- ifSigs = map toIfaceClassOp op_stuff,
- ifMinDef = fmap getOccName (classMinimalDef clas),
- ifRec = boolToRecFlag (isRecursiveTyCon tycon) }
+ = ( env1
+ , IfaceClass { ifCtxt = tidyToIfaceContext env1 sc_theta,
+ ifName = getOccName (classTyCon clas),
+ ifTyVars = toIfaceTvBndrs clas_tyvars',
+ ifRoles = tyConRoles (classTyCon clas),
+ ifFDs = map toIfaceFD clas_fds,
+ ifATs = map toIfaceAT clas_ats,
+ ifSigs = map toIfaceClassOp op_stuff,
+ ifMinDef = fmap getFS (classMinimalDef clas),
+ ifRec = boolToRecFlag (isRecursiveTyCon tycon) })
where
(clas_tyvars, clas_fds, sc_theta, _, clas_ats, op_stuff)
= classExtraBigSig clas
@@ -1645,8 +1704,10 @@ classToIfaceDecl env clas
(env1, clas_tyvars') = tidyTyVarBndrs env clas_tyvars
toIfaceAT :: ClassATItem -> IfaceAT
- toIfaceAT (tc, defs)
- = IfaceAT (tyConToIfaceDecl env1 tc) (map (coAxBranchToIfaceBranch' env1) defs)
+ toIfaceAT (ATI tc def)
+ = IfaceAT if_decl (fmap (tidyToIfaceType env2) def)
+ where
+ (env2, if_decl) = tyConToIfaceDecl env1 tc
toIfaceClassOp (sel_id, def_meth)
= ASSERT(sel_tyvars == clas_tyvars)
@@ -1672,6 +1733,9 @@ classToIfaceDecl env clas
tidyToIfaceType :: TidyEnv -> Type -> IfaceType
tidyToIfaceType env ty = toIfaceType (tidyType env ty)
+tidyToIfaceTcArgs :: TidyEnv -> TyCon -> [Type] -> IfaceTcArgs
+tidyToIfaceTcArgs env tc tys = toIfaceTcArgs tc (tidyTypes env tys)
+
tidyToIfaceContext :: TidyEnv -> ThetaType -> IfaceContext
tidyToIfaceContext env theta = map (tidyToIfaceType env) theta
@@ -1904,7 +1968,9 @@ toIfaceExpr (Tick t e)
toIfaceTickish :: Tickish Id -> Maybe IfaceTickish
toIfaceTickish (ProfNote cc tick push) = Just (IfaceSCC cc tick push)
toIfaceTickish (HpcTick modl ix) = Just (IfaceHpcTick modl ix)
-toIfaceTickish (Breakpoint {}) = Nothing
+toIfaceTickish (SourceNote src names) = Just (IfaceSource src names)
+toIfaceTickish (CoreNote {}) = Nothing
+toIfaceTickish (Breakpoint {}) = Nothing
-- Ignore breakpoints, since they are relevant only to GHCi, and
-- should not be serialised (Trac #8333)
diff --git a/compiler/iface/TcIface.lhs b/compiler/iface/TcIface.lhs
index 20adfe5896be..4d94ab8d7b01 100644
--- a/compiler/iface/TcIface.lhs
+++ b/compiler/iface/TcIface.lhs
@@ -6,14 +6,15 @@
Type checking of type signatures in interface files
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcIface (
tcLookupImported_maybe,
importDecl, checkWiredInTyCon, tcHiBootIface, typecheckIface,
tcIfaceDecl, tcIfaceInst, tcIfaceFamInst, tcIfaceRules,
tcIfaceVectInfo, tcIfaceAnnotations,
tcIfaceExpr, -- Desired by HERMIT (Trac #7683)
- tcIfaceGlobal,
- tcExtCoreBindings
+ tcIfaceGlobal
) where
#include "HsVersions.h"
@@ -343,26 +344,34 @@ tcHiBootIface hsc_src mod
else do
-- OK, so we're in one-shot mode.
- -- In that case, we're read all the direct imports by now,
- -- so eps_is_boot will record if any of our imports mention us by
- -- way of hi-boot file
- { eps <- getEps
- ; case lookupUFM (eps_is_boot eps) (moduleName mod) of {
- Nothing -> return emptyModDetails ; -- The typical case
+ -- Re #9245, we always check if there is an hi-boot interface
+ -- to check consistency against, rather than just when we notice
+ -- that an hi-boot is necessary due to a circular import.
+ { read_result <- findAndReadIface
+ need mod
+ True -- Hi-boot file
- Just (_, False) -> failWithTc moduleLoop ;
+ ; case read_result of {
+ Succeeded (iface, _path) -> typecheckIface iface ;
+ Failed err ->
+
+ -- There was no hi-boot file. But if there is circularity in
+ -- the module graph, there really should have been one.
+ -- Since we've read all the direct imports by now,
+ -- eps_is_boot will record if any of our imports mention the
+ -- current module, which either means a module loop (not
+ -- a SOURCE import) or that our hi-boot file has mysteriously
+ -- disappeared.
+ do { eps <- getEps
+ ; case lookupUFM (eps_is_boot eps) (moduleName mod) of
+ Nothing -> return emptyModDetails -- The typical case
+
+ Just (_, False) -> failWithTc moduleLoop
-- Someone below us imported us!
-- This is a loop with no hi-boot in the way
- Just (_mod, True) -> -- There's a hi-boot interface below us
-
- do { read_result <- findAndReadIface
- need mod
- True -- Hi-boot file
-
- ; case read_result of
- Failed err -> failWithTc (elaborate err)
- Succeeded (iface, _path) -> typecheckIface iface
+ Just (_mod, True) -> failWithTc (elaborate err)
+ -- The hi-boot file has mysteriously disappeared.
}}}}
where
need = ptext (sLit "Need the hi-boot interface for") <+> ppr mod
@@ -451,41 +460,26 @@ tc_iface_decl parent _ (IfaceData {ifName = occ_name,
ifCtxt = ctxt, ifGadtSyntax = gadt_syn,
ifCons = rdr_cons,
ifRec = is_rec, ifPromotable = is_prom,
- ifAxiom = mb_axiom_name })
+ ifParent = mb_parent })
= bindIfaceTyVars_AT tv_bndrs $ \ tyvars -> do
{ tc_name <- lookupIfaceTop occ_name
; tycon <- fixM $ \ tycon -> do
{ stupid_theta <- tcIfaceCtxt ctxt
- ; parent' <- tc_parent tyvars mb_axiom_name
+ ; parent' <- tc_parent mb_parent
; cons <- tcIfaceDataCons tc_name tycon tyvars rdr_cons
; return (buildAlgTyCon tc_name tyvars roles cType stupid_theta
cons is_rec is_prom gadt_syn parent') }
; traceIf (text "tcIfaceDecl4" <+> ppr tycon)
; return (ATyCon tycon) }
where
- tc_parent :: [TyVar] -> Maybe Name -> IfL TyConParent
- tc_parent _ Nothing = return parent
- tc_parent tyvars (Just ax_name)
+ tc_parent :: IfaceTyConParent -> IfL TyConParent
+ tc_parent IfNoParent = return parent
+ tc_parent (IfDataInstance ax_name _ arg_tys)
= ASSERT( isNoParent parent )
do { ax <- tcIfaceCoAxiom ax_name
- ; let fam_tc = coAxiomTyCon ax
+ ; let fam_tc = coAxiomTyCon ax
ax_unbr = toUnbranchedAxiom ax
- -- data families don't have branches:
- branch = coAxiomSingleBranch ax_unbr
- ax_tvs = coAxBranchTyVars branch
- ax_lhs = coAxBranchLHS branch
- tycon_tys = mkTyVarTys tyvars
- subst = mkTopTvSubst (ax_tvs `zip` tycon_tys)
- -- The subst matches the tyvar of the TyCon
- -- with those from the CoAxiom. They aren't
- -- necessarily the same, since the two may be
- -- gotten from separate interface-file declarations
- -- NB: ax_tvs may be shorter because of eta-reduction
- -- See Note [Eta reduction for data family axioms] in TcInstDcls
- lhs_tys = substTys subst ax_lhs `chkAppend`
- dropList ax_tvs tycon_tys
- -- The 'lhs_tys' should be 1-1 with the 'tyvars'
- -- but ax_tvs maybe shorter because of eta-reduction
+ ; lhs_tys <- tcIfaceTcArgs arg_tys
; return (FamInstTyCon ax_unbr fam_tc lhs_tys) }
tc_iface_decl parent _ (IfaceSyn {ifName = occ_name, ifTyVars = tv_bndrs,
@@ -502,12 +496,14 @@ tc_iface_decl parent _ (IfaceSyn {ifName = occ_name, ifTyVars = tv_bndrs,
where
mk_doc n = ptext (sLit "Type syonym") <+> ppr n
tc_syn_rhs IfaceOpenSynFamilyTyCon = return OpenSynFamilyTyCon
- tc_syn_rhs (IfaceClosedSynFamilyTyCon ax_name)
+ tc_syn_rhs (IfaceClosedSynFamilyTyCon ax_name _)
= do { ax <- tcIfaceCoAxiom ax_name
; return (ClosedSynFamilyTyCon ax) }
tc_syn_rhs IfaceAbstractClosedSynFamilyTyCon = return AbstractClosedSynFamilyTyCon
tc_syn_rhs (IfaceSynonymTyCon ty) = do { rhs_ty <- tcIfaceType ty
; return (SynonymTyCon rhs_ty) }
+ tc_syn_rhs IfaceBuiltInSynFamTyCon = pprPanic "tc_iface_decl"
+ (ptext (sLit "IfaceBuiltInSynFamTyCon in interface file"))
tc_iface_decl _parent ignore_prags
(IfaceClass {ifCtxt = rdr_ctxt, ifName = tc_occ,
@@ -524,11 +520,11 @@ tc_iface_decl _parent ignore_prags
; sigs <- mapM tc_sig rdr_sigs
; fds <- mapM tc_fd rdr_fds
; traceIf (text "tc-iface-class3" <+> ppr tc_occ)
- ; mindef <- traverse lookupIfaceTop mindef_occ
+ ; mindef <- traverse (lookupIfaceTop . mkVarOccFS) mindef_occ
; cls <- fixM $ \ cls -> do
{ ats <- mapM (tc_at cls) rdr_ats
; traceIf (text "tc-iface-class4" <+> ppr tc_occ)
- ; buildClass ignore_prags tc_name tyvars roles ctxt fds ats sigs mindef tc_isrec }
+ ; buildClass tc_name tyvars roles ctxt fds ats sigs mindef tc_isrec }
; return (ATyCon (classTyCon cls)) }
where
tc_sc pred = forkM (mk_sc_doc pred) (tcIfaceType pred)
@@ -548,13 +544,18 @@ tc_iface_decl _parent ignore_prags
-- it mentions unless it's necessary to do so
; return (op_name, dm, op_ty) }
- tc_at cls (IfaceAT tc_decl defs_decls)
+ tc_at cls (IfaceAT tc_decl if_def)
= do ATyCon tc <- tc_iface_decl (AssocFamilyTyCon cls) ignore_prags tc_decl
- defs <- forkM (mk_at_doc tc) (tc_ax_branches tc defs_decls)
+ mb_def <- case if_def of
+ Nothing -> return Nothing
+ Just def -> forkM (mk_at_doc tc) $
+ extendIfaceTyVarEnv (tyConTyVars tc) $
+ do { tc_def <- tcIfaceType def
+ ; return (Just tc_def) }
-- Must be done lazily in case the RHS of the defaults mention
-- the type constructor being defined here
-- e.g. type AT a; type AT b = AT [b] Trac #8002
- return (tc, defs)
+ return (ATI tc mb_def)
mk_sc_doc pred = ptext (sLit "Superclass") <+> ppr pred
mk_at_doc tc = ptext (sLit "Associated type") <+> ppr tc
@@ -573,9 +574,8 @@ tc_iface_decl _ _ (IfaceAxiom { ifName = ax_occ, ifTyCon = tc
, ifAxBranches = branches, ifRole = role })
= do { tc_name <- lookupIfaceTop ax_occ
; tc_tycon <- tcIfaceTyCon tc
- ; tc_branches <- tc_ax_branches tc_tycon branches
- ; let axiom = computeAxiomIncomps $
- CoAxiom { co_ax_unique = nameUnique tc_name
+ ; tc_branches <- tc_ax_branches branches
+ ; let axiom = CoAxiom { co_ax_unique = nameUnique tc_name
, co_ax_name = tc_name
, co_ax_tc = tc_tycon
, co_ax_role = role
@@ -584,7 +584,8 @@ tc_iface_decl _ _ (IfaceAxiom { ifName = ax_occ, ifTyCon = tc
; return (ACoAxiom axiom) }
tc_iface_decl _ _ (IfacePatSyn{ ifName = occ_name
- , ifPatHasWrapper = has_wrapper
+ , ifPatMatcher = matcher_name
+ , ifPatWrapper = wrapper_name
, ifPatIsInfix = is_infix
, ifPatUnivTvs = univ_tvs
, ifPatExTvs = ex_tvs
@@ -594,31 +595,35 @@ tc_iface_decl _ _ (IfacePatSyn{ ifName = occ_name
, ifPatTy = pat_ty })
= do { name <- lookupIfaceTop occ_name
; traceIf (ptext (sLit "tc_iface_decl") <+> ppr name)
+ ; matcher <- tcExt "Matcher" matcher_name
+ ; wrapper <- case wrapper_name of
+ Nothing -> return Nothing
+ Just wn -> do { wid <- tcExt "Wrapper" wn
+ ; return (Just wid) }
; bindIfaceTyVars univ_tvs $ \univ_tvs -> do
{ bindIfaceTyVars ex_tvs $ \ex_tvs -> do
- { bindIfaceIdVars args $ \args -> do
- { ~(prov_theta, req_theta, pat_ty) <- forkM (mk_doc name) $
+ { patsyn <- forkM (mk_doc name) $
do { prov_theta <- tcIfaceCtxt prov_ctxt
; req_theta <- tcIfaceCtxt req_ctxt
; pat_ty <- tcIfaceType pat_ty
- ; return (prov_theta, req_theta, pat_ty) }
- ; bindIfaceTyVar (fsLit "r", toIfaceKind liftedTypeKind) $ \tv -> do
- { patsyn <- buildPatSyn name is_infix has_wrapper args univ_tvs ex_tvs prov_theta req_theta pat_ty tv
- ; return (AConLike (PatSynCon patsyn)) }}}}}
+ ; arg_tys <- mapM tcIfaceType args
+ ; return $ buildPatSyn name is_infix matcher wrapper
+ arg_tys univ_tvs ex_tvs prov_theta req_theta pat_ty }
+ ; return $ AConLike . PatSynCon $ patsyn }}}
where
mk_doc n = ptext (sLit "Pattern synonym") <+> ppr n
+ tcExt s name = forkM (ptext (sLit s) <+> ppr name) $ tcIfaceExtId name
+tc_ax_branches :: [IfaceAxBranch] -> IfL [CoAxBranch]
+tc_ax_branches if_branches = foldlM tc_ax_branch [] if_branches
-tc_ax_branches :: TyCon -> [IfaceAxBranch] -> IfL [CoAxBranch]
-tc_ax_branches tc if_branches = foldlM (tc_ax_branch (tyConKind tc)) [] if_branches
-
-tc_ax_branch :: Kind -> [CoAxBranch] -> IfaceAxBranch -> IfL [CoAxBranch]
-tc_ax_branch tc_kind prev_branches
+tc_ax_branch :: [CoAxBranch] -> IfaceAxBranch -> IfL [CoAxBranch]
+tc_ax_branch prev_branches
(IfaceAxBranch { ifaxbTyVars = tv_bndrs, ifaxbLHS = lhs, ifaxbRHS = rhs
, ifaxbRoles = roles, ifaxbIncomps = incomps })
= bindIfaceTyVars_AT tv_bndrs $ \ tvs -> do
-- The _AT variant is needed here; see Note [CoAxBranch type variables] in CoAxiom
- { tc_lhs <- tcIfaceTcArgs tc_kind lhs -- See Note [Checking IfaceTypes vs IfaceKinds]
+ { tc_lhs <- tcIfaceTcArgs lhs -- See Note [Checking IfaceTypes vs IfaceKinds]
; tc_rhs <- tcIfaceType rhs
; let br = CoAxBranch { cab_loc = noSrcSpan
, cab_tvs = tvs
@@ -629,7 +634,7 @@ tc_ax_branch tc_kind prev_branches
; return (prev_branches ++ [br]) }
tcIfaceDataCons :: Name -> TyCon -> [TyVar] -> IfaceConDecls -> IfL AlgTyConRhs
-tcIfaceDataCons tycon_name tycon _ if_cons
+tcIfaceDataCons tycon_name tycon tc_tyvars if_cons
= case if_cons of
IfAbstractTyCon dis -> return (AbstractTyCon dis)
IfDataFamTyCon -> return DataFamilyTyCon
@@ -639,11 +644,12 @@ tcIfaceDataCons tycon_name tycon _ if_cons
; mkNewTyConRhs tycon_name tycon data_con }
where
tc_con_decl (IfCon { ifConInfix = is_infix,
- ifConUnivTvs = univ_tvs, ifConExTvs = ex_tvs,
+ ifConExTvs = ex_tvs,
ifConOcc = occ, ifConCtxt = ctxt, ifConEqSpec = spec,
ifConArgTys = args, ifConFields = field_lbls,
ifConStricts = if_stricts})
- = bindIfaceTyVars univ_tvs $ \ univ_tyvars -> do
+ = -- Universally-quantified tyvars are shared with
+ -- parent TyCon, and are alrady in scope
bindIfaceTyVars ex_tvs $ \ ex_tyvars -> do
{ traceIf (text "Start interface-file tc_con_decl" <+> ppr occ)
; name <- lookupIfaceTop occ
@@ -665,12 +671,12 @@ tcIfaceDataCons tycon_name tycon _ if_cons
-- Remember, tycon is the representation tycon
; let orig_res_ty = mkFamilyTyConApp tycon
- (substTyVars (mkTopTvSubst eq_spec) univ_tyvars)
+ (substTyVars (mkTopTvSubst eq_spec) tc_tyvars)
; con <- buildDataCon (pprPanic "tcIfaceDataCons: FamInstEnvs" (ppr name))
name is_infix
stricts lbl_names
- univ_tyvars ex_tyvars
+ tc_tyvars ex_tyvars
eq_spec theta
arg_tys orig_res_ty tycon
; traceIf (text "Done interface-file tc_con_decl" <+> ppr name)
@@ -683,11 +689,11 @@ tcIfaceDataCons tycon_name tycon _ if_cons
tc_strict (IfUnpackCo if_co) = do { co <- tcIfaceCo if_co
; return (HsUnpack (Just co)) }
-tcIfaceEqSpec :: [(OccName, IfaceType)] -> IfL [(TyVar, Type)]
+tcIfaceEqSpec :: IfaceEqSpec -> IfL [(TyVar, Type)]
tcIfaceEqSpec spec
= mapM do_item spec
where
- do_item (occ, if_ty) = do { tv <- tcIfaceTyVar (occNameFS occ)
+ do_item (occ, if_ty) = do { tv <- tcIfaceTyVar occ
; ty <- tcIfaceType if_ty
; return (tv,ty) }
\end{code}
@@ -958,25 +964,38 @@ tcIfaceType :: IfaceType -> IfL Type
tcIfaceType (IfaceTyVar n) = do { tv <- tcIfaceTyVar n; return (TyVarTy tv) }
tcIfaceType (IfaceAppTy t1 t2) = do { t1' <- tcIfaceType t1; t2' <- tcIfaceType t2; return (AppTy t1' t2') }
tcIfaceType (IfaceLitTy l) = do { l1 <- tcIfaceTyLit l; return (LitTy l1) }
-tcIfaceType (IfaceFunTy t1 t2) = do { t1' <- tcIfaceType t1; t2' <- tcIfaceType t2; return (FunTy t1' t2') }
+tcIfaceType (IfaceFunTy t1 t2) = tcIfaceTypeFun t1 t2
+tcIfaceType (IfaceDFunTy t1 t2) = tcIfaceTypeFun t1 t2
tcIfaceType (IfaceTyConApp tc tks) = do { tc' <- tcIfaceTyCon tc
- ; tks' <- tcIfaceTcArgs (tyConKind tc') tks
+ ; tks' <- tcIfaceTcArgs tks
; return (mkTyConApp tc' tks') }
tcIfaceType (IfaceForAllTy tv t) = bindIfaceTyVar tv $ \ tv' -> do { t' <- tcIfaceType t; return (ForAllTy tv' t') }
-tcIfaceTypes :: [IfaceType] -> IfL [Type]
-tcIfaceTypes tys = mapM tcIfaceType tys
-
-tcIfaceTcArgs :: Kind -> [IfaceType] -> IfL [Type]
-tcIfaceTcArgs _ []
- = return []
-tcIfaceTcArgs kind (tk:tks)
- = case splitForAllTy_maybe kind of
- Nothing -> tcIfaceTypes (tk:tks)
- Just (_, kind') -> do { k' <- tcIfaceKind tk
- ; tks' <- tcIfaceTcArgs kind' tks
- ; return (k':tks') }
-
+tcIfaceTypeFun :: IfaceType -> IfaceType -> IfL Type
+tcIfaceTypeFun t1 t2 = do { t1' <- tcIfaceType t1; t2' <- tcIfaceType t2; return (FunTy t1' t2') }
+
+tcIfaceKind :: IfaceKind -> IfL Type
+tcIfaceKind (IfaceAppTy t1 t2) = do { t1' <- tcIfaceKind t1; t2' <- tcIfaceKind t2; return (AppTy t1' t2') }
+tcIfaceKind (IfaceFunTy t1 t2) = tcIfaceKindFun t1 t2
+tcIfaceKind (IfaceDFunTy t1 t2) = tcIfaceKindFun t1 t2
+tcIfaceKind (IfaceLitTy l) = pprPanic "tcIfaceKind" (ppr l)
+tcIfaceKind k = tcIfaceType k
+
+tcIfaceKindFun :: IfaceKind -> IfaceKind -> IfL Type
+tcIfaceKindFun t1 t2 = do { t1' <- tcIfaceKind t1; t2' <- tcIfaceKind t2; return (FunTy t1' t2') }
+
+tcIfaceTcArgs :: IfaceTcArgs -> IfL [Type]
+tcIfaceTcArgs args
+ = case args of
+ ITC_Type t ts ->
+ do { t' <- tcIfaceType t
+ ; ts' <- tcIfaceTcArgs ts
+ ; return (t':ts') }
+ ITC_Kind k ks ->
+ do { k' <- tcIfaceKind k
+ ; ks' <- tcIfaceTcArgs ks
+ ; return (k':ks') }
+ ITC_Nil -> return []
-----------------------------------------
tcIfaceCtxt :: IfaceContext -> IfL ThetaType
tcIfaceCtxt sts = mapM tcIfaceType sts
@@ -985,43 +1004,8 @@ tcIfaceCtxt sts = mapM tcIfaceType sts
tcIfaceTyLit :: IfaceTyLit -> IfL TyLit
tcIfaceTyLit (IfaceNumTyLit n) = return (NumTyLit n)
tcIfaceTyLit (IfaceStrTyLit n) = return (StrTyLit n)
-
------------------------------------------
-tcIfaceKind :: IfaceKind -> IfL Kind -- See Note [Checking IfaceTypes vs IfaceKinds]
-tcIfaceKind (IfaceTyVar n) = do { tv <- tcIfaceTyVar n; return (TyVarTy tv) }
-tcIfaceKind (IfaceAppTy t1 t2) = do { t1' <- tcIfaceKind t1; t2' <- tcIfaceKind t2; return (AppTy t1' t2') }
-tcIfaceKind (IfaceFunTy t1 t2) = do { t1' <- tcIfaceKind t1; t2' <- tcIfaceKind t2; return (FunTy t1' t2') }
-tcIfaceKind (IfaceTyConApp tc ts) = do { tc' <- tcIfaceKindCon tc; ts' <- tcIfaceKinds ts; return (mkTyConApp tc' ts') }
-tcIfaceKind (IfaceForAllTy tv t) = bindIfaceTyVar tv $ \ tv' -> do { t' <- tcIfaceKind t; return (ForAllTy tv' t') }
-tcIfaceKind t = pprPanic "tcIfaceKind" (ppr t) -- IfaceCoApp, IfaceLitTy
-
-tcIfaceKinds :: [IfaceKind] -> IfL [Kind]
-tcIfaceKinds tys = mapM tcIfaceKind tys
\end{code}
-Note [Checking IfaceTypes vs IfaceKinds]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We need to know whether we are checking a *type* or a *kind*.
-Consider module M where
- Proxy :: forall k. k -> *
- data T = T
-and consider the two IfaceTypes
- M.Proxy * M.T{tc}
- M.Proxy 'M.T{tc} 'M.T(d}
-The first is conventional, but in the latter we use the promoted
-type constructor (as a kind) and data constructor (as a type). However,
-the Name of the promoted type constructor is just M.T; it's the *same name*
-as the ordinary type constructor.
-
-We could add a "promoted" flag to an IfaceTyCon, but that's a bit heavy.
-Instead we use context to distinguish, as in the source language.
- - When checking a kind, we look up M.T{tc} and promote it
- - When checking a type, we look up M.T{tc} and don't promote it
- and M.T{d} and promote it
- See tcIfaceKindCon and tcIfaceKTyCon respectively
-
-This context business is why we need tcIfaceTcArgs, and tcIfaceApps
-
%************************************************************************
%* *
@@ -1170,8 +1154,14 @@ tcIfaceExpr (IfaceLet (IfaceRec pairs) body)
tcIfaceExpr (IfaceTick tickish expr) = do
expr' <- tcIfaceExpr expr
- tickish' <- tcIfaceTickish tickish
- return (Tick tickish' expr')
+ -- If debug flag is not set: Ignore source notes
+ dbgFlag <- fmap (gopt Opt_Debug) getDynFlags
+ case tickish of
+ IfaceSource{} | not dbgFlag
+ -> return expr'
+ _otherwise -> do
+ tickish' <- tcIfaceTickish tickish
+ return (Tick tickish' expr')
-------------------------
tcIfaceApps :: IfaceExpr -> IfaceExpr -> IfL CoreExpr
@@ -1187,7 +1177,7 @@ tcIfaceApps fun arg
go_up fun _ [] = return fun
go_up fun fun_ty (IfaceType t : args)
| Just (tv,body_ty) <- splitForAllTy_maybe fun_ty
- = do { t' <- if isKindVar tv -- See Note [Checking IfaceTypes vs IfaceKinds]
+ = do { t' <- if isKindVar tv
then tcIfaceKind t
else tcIfaceType t
; let fun_ty' = substTyWith [tv] [t'] body_ty
@@ -1202,6 +1192,7 @@ tcIfaceApps fun arg
tcIfaceTickish :: IfaceTickish -> IfM lcl (Tickish Id)
tcIfaceTickish (IfaceHpcTick modl ix) = return (HpcTick modl ix)
tcIfaceTickish (IfaceSCC cc tick push) = return (ProfNote cc tick push)
+tcIfaceTickish (IfaceSource src name) = return (SourceNote src name)
-------------------------
tcIfaceLit :: Literal -> IfL Literal
@@ -1252,30 +1243,6 @@ tcIfaceDataAlt con inst_tys arg_strs rhs
\end{code}
-\begin{code}
-tcExtCoreBindings :: [IfaceBinding] -> IfL CoreProgram -- Used for external core
-tcExtCoreBindings [] = return []
-tcExtCoreBindings (b:bs) = do_one b (tcExtCoreBindings bs)
-
-do_one :: IfaceBinding -> IfL [CoreBind] -> IfL [CoreBind]
-do_one (IfaceNonRec bndr rhs) thing_inside
- = do { rhs' <- tcIfaceExpr rhs
- ; bndr' <- newExtCoreBndr bndr
- ; extendIfaceIdEnv [bndr'] $ do
- { core_binds <- thing_inside
- ; return (NonRec bndr' rhs' : core_binds) }}
-
-do_one (IfaceRec pairs) thing_inside
- = do { bndrs' <- mapM newExtCoreBndr bndrs
- ; extendIfaceIdEnv bndrs' $ do
- { rhss' <- mapM tcIfaceExpr rhss
- ; core_binds <- thing_inside
- ; return (Rec (bndrs' `zip` rhss') : core_binds) }}
- where
- (bndrs,rhss) = unzip pairs
-\end{code}
-
-
%************************************************************************
%* *
IdInfo
@@ -1458,26 +1425,19 @@ tcIfaceGlobal name
-- emasculated form (e.g. lacking data constructors).
tcIfaceTyCon :: IfaceTyCon -> IfL TyCon
-tcIfaceTyCon (IfaceTc name)
- = do { thing <- tcIfaceGlobal name
- ; case thing of -- A "type constructor" can be a promoted data constructor
- -- c.f. Trac #5881
- ATyCon tc -> return tc
- AConLike (RealDataCon dc) -> return (promoteDataCon dc)
- _ -> pprPanic "tcIfaceTyCon" (ppr name $$ ppr thing) }
-
-tcIfaceKindCon :: IfaceTyCon -> IfL TyCon
-tcIfaceKindCon (IfaceTc name)
- = do { thing <- tcIfaceGlobal name
- ; case thing of -- A "type constructor" here is a promoted type constructor
- -- c.f. Trac #5881
- ATyCon tc
- | isSuperKind (tyConKind tc)
- -> return tc -- Mainly just '*' or 'AnyK'
- | Just prom_tc <- promotableTyCon_maybe tc
- -> return prom_tc
-
- _ -> pprPanic "tcIfaceKindCon" (ppr name $$ ppr thing) }
+tcIfaceTyCon itc
+ = do {
+ ; thing <- tcIfaceGlobal (ifaceTyConName itc)
+ ; case itc of
+ IfaceTc _ -> return $ tyThingTyCon thing
+ IfacePromotedDataCon _ -> return $ promoteDataCon $ tyThingDataCon thing
+ IfacePromotedTyCon name ->
+ let ktycon tc
+ | isSuperKind (tyConKind tc) = return tc
+ | Just prom_tc <- promotableTyCon_maybe tc = return prom_tc
+ | otherwise = pprPanic "tcIfaceTyCon" (ppr name $$ ppr thing)
+ in ktycon (tyThingTyCon thing)
+ }
tcIfaceCoAxiom :: Name -> IfL (CoAxiom Branched)
tcIfaceCoAxiom name = do { thing <- tcIfaceGlobal name
@@ -1519,14 +1479,6 @@ bindIfaceBndrs (b:bs) thing_inside
bindIfaceBndrs bs $ \ bs' ->
thing_inside (b':bs')
------------------------
-newExtCoreBndr :: IfaceLetBndr -> IfL Id
-newExtCoreBndr (IfLetBndr var ty _) -- Ignoring IdInfo for now
- = do { mod <- getIfModule
- ; name <- newGlobalBinder mod (mkVarOccFS var) noSrcSpan
- ; ty' <- tcIfaceType ty
- ; return (mkLocalId name ty') }
-
-----------------------
bindIfaceTyVar :: IfaceTvBndr -> (TyVar -> IfL a) -> IfL a
bindIfaceTyVar (occ,kind) thing_inside
@@ -1548,22 +1500,8 @@ bindIfaceTyVars bndrs thing_inside
where
(occs,kinds) = unzip bndrs
-bindIfaceIdVar :: IfaceIdBndr -> (Id -> IfL a) -> IfL a
-bindIfaceIdVar (occ, ty) thing_inside
- = do { name <- newIfaceName (mkVarOccFS occ)
- ; ty' <- tcIfaceType ty
- ; let id = mkLocalId name ty'
- ; extendIfaceIdEnv [id] (thing_inside id) }
-
-bindIfaceIdVars :: [IfaceIdBndr] -> ([Id] -> IfL a) -> IfL a
-bindIfaceIdVars [] thing_inside = thing_inside []
-bindIfaceIdVars (v:vs) thing_inside
- = bindIfaceIdVar v $ \ v' ->
- bindIfaceIdVars vs $ \ vs' ->
- thing_inside (v':vs')
-
isSuperIfaceKind :: IfaceKind -> Bool
-isSuperIfaceKind (IfaceTyConApp (IfaceTc n) []) = n == superKindTyConName
+isSuperIfaceKind (IfaceTyConApp tc ITC_Nil) = ifaceTyConName tc == superKindTyConName
isSuperIfaceKind _ = False
mk_iface_tyvar :: Name -> IfaceKind -> IfL TyVar
diff --git a/compiler/llvmGen/Llvm/AbsSyn.hs b/compiler/llvmGen/Llvm/AbsSyn.hs
index f92bd89c5cd6..24d0856ea32e 100644
--- a/compiler/llvmGen/Llvm/AbsSyn.hs
+++ b/compiler/llvmGen/Llvm/AbsSyn.hs
@@ -65,6 +65,8 @@ data LlvmFunction = LlvmFunction {
type LlvmFunctions = [LlvmFunction]
+type SingleThreaded = Bool
+
-- | LLVM ordering types for synchronization purposes. (Introduced in LLVM
-- 3.0). Please see the LLVM documentation for a better description.
data LlvmSyncOrdering
@@ -223,6 +225,11 @@ data LlvmExpression
-}
| Load LlvmVar
+ {- |
+ Atomic load of the value at location ptr
+ -}
+ | ALoad LlvmSyncOrdering SingleThreaded LlvmVar
+
{- |
Navigate in an structure, selecting elements
* inbound: Is the pointer inbounds? (computed pointer doesn't overflow)
diff --git a/compiler/llvmGen/Llvm/PpLlvm.hs b/compiler/llvmGen/Llvm/PpLlvm.hs
index b8343ceff361..73077257f89a 100644
--- a/compiler/llvmGen/Llvm/PpLlvm.hs
+++ b/compiler/llvmGen/Llvm/PpLlvm.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
--------------------------------------------------------------------------------
-- | Pretty print LLVM IR Code.
--
@@ -237,6 +239,7 @@ ppLlvmExpression expr
Insert vec elt idx -> ppInsert vec elt idx
GetElemPtr inb ptr indexes -> ppGetElementPtr inb ptr indexes
Load ptr -> ppLoad ptr
+ ALoad ord st ptr -> ppALoad ord st ptr
Malloc tp amount -> ppMalloc tp amount
Phi tp precessors -> ppPhi tp precessors
Asm asm c ty v se sk -> ppAsm asm c ty v se sk
@@ -325,13 +328,18 @@ ppSyncOrdering SyncSeqCst = text "seq_cst"
-- of specifying alignment.
ppLoad :: LlvmVar -> SDoc
-ppLoad var
- | isVecPtrVar var = text "load" <+> ppr var <>
- comma <+> text "align 1"
- | otherwise = text "load" <+> ppr var
+ppLoad var = text "load" <+> ppr var <> align
where
- isVecPtrVar :: LlvmVar -> Bool
- isVecPtrVar = isVector . pLower . getVarType
+ align | isVector . pLower . getVarType $ var = text ", align 1"
+ | otherwise = empty
+
+ppALoad :: LlvmSyncOrdering -> SingleThreaded -> LlvmVar -> SDoc
+ppALoad ord st var = sdocWithDynFlags $ \dflags ->
+ let alignment = (llvmWidthInBits dflags $ getVarType var) `quot` 8
+ align = text ", align" <+> ppr alignment
+ sThreaded | st = text " singlethread"
+ | otherwise = empty
+ in text "load atomic" <+> ppr var <> sThreaded <+> ppSyncOrdering ord <> align
ppStore :: LlvmVar -> LlvmVar -> SDoc
ppStore val dst
diff --git a/compiler/llvmGen/Llvm/Types.hs b/compiler/llvmGen/Llvm/Types.hs
index 6b9c8c181a26..89b0e4e14179 100644
--- a/compiler/llvmGen/Llvm/Types.hs
+++ b/compiler/llvmGen/Llvm/Types.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
+{-# LANGUAGE CPP, GeneralizedNewtypeDeriving #-}
--------------------------------------------------------------------------------
-- | The LLVM Type System.
diff --git a/compiler/llvmGen/LlvmCodeGen.hs b/compiler/llvmGen/LlvmCodeGen.hs
index d0f343fa925b..dd16e5286854 100644
--- a/compiler/llvmGen/LlvmCodeGen.hs
+++ b/compiler/llvmGen/LlvmCodeGen.hs
@@ -1,7 +1,8 @@
+{-# LANGUAGE CPP, TypeFamilies #-}
+
-- -----------------------------------------------------------------------------
-- | This is the top-level module in the LLVM code generator.
--
-
module LlvmCodeGen ( llvmCodeGen, llvmFixupAsm ) where
#include "HsVersions.h"
diff --git a/compiler/llvmGen/LlvmCodeGen/Base.hs b/compiler/llvmGen/LlvmCodeGen/Base.hs
index 5d5f385ade29..50cd824b2481 100644
--- a/compiler/llvmGen/LlvmCodeGen/Base.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Base.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- ----------------------------------------------------------------------------
-- | Base LLVM Code Generation module
--
@@ -404,7 +406,7 @@ strDisplayName_llvm lbl = do
dflags <- getDynFlags
let sdoc = pprCLabel platform lbl
depth = Outp.PartWay 1
- style = Outp.mkUserStyle (\ _ _ -> Outp.NameNotInScope2, Outp.alwaysQualifyModules) depth
+ style = Outp.mkUserStyle Outp.reallyAlwaysQualify depth
str = Outp.renderWithStyle dflags sdoc style
return (fsLit (dropInfoSuffix str))
diff --git a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
index 808c591d92d7..98df17440479 100644
--- a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
+++ b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
@@ -1,9 +1,8 @@
-{-# OPTIONS -fno-warn-type-defaults #-}
+{-# LANGUAGE CPP, GADTs #-}
+{-# OPTIONS_GHC -fno-warn-type-defaults #-}
-- ----------------------------------------------------------------------------
-- | Handle conversion of CmmProc to LLVM code.
--
-
-{-# LANGUAGE GADTs #-}
module LlvmCodeGen.CodeGen ( genLlvmProc ) where
#include "HsVersions.h"
@@ -16,6 +15,7 @@ import BlockId
import CodeGen.Platform ( activeStgRegs, callerSaves )
import CLabel
import Cmm
+import CPrim
import PprCmm
import CmmUtils
import Hoopl
@@ -33,6 +33,7 @@ import Unique
import Data.List ( nub )
import Data.Maybe ( catMaybes )
+type Atomic = Bool
type LlvmStatements = OrdList LlvmStatement
-- -----------------------------------------------------------------------------
@@ -73,7 +74,8 @@ basicBlocksCodeGen live (entryBlock:cmmBlocks)
-- | Generate code for one block
basicBlockCodeGen :: CmmBlock -> LlvmM ( LlvmBasicBlock, [LlvmCmmDecl] )
basicBlockCodeGen block
- = do let (CmmEntry id, nodes, tail) = blockSplit block
+ = do let (_, nodes, tail) = blockSplit block
+ id = entryLabel block
(mid_instrs, top) <- stmtsToInstrs $ blockToList nodes
(tail_instrs, top') <- stmtToInstrs tail
let instrs = fromOL (mid_instrs `appOL` tail_instrs)
@@ -101,6 +103,8 @@ stmtToInstrs :: CmmNode e x -> LlvmM StmtData
stmtToInstrs stmt = case stmt of
CmmComment _ -> return (nilOL, []) -- nuke comments
+ CmmTick _ -> return (nilOL, [])
+ CmmUnwind {} -> return (nilOL, [])
CmmAssign reg src -> genAssign reg src
CmmStore addr src -> genStore addr src
@@ -223,12 +227,28 @@ genCall t@(PrimTarget (MO_Prefetch_Data localityInt)) [] args
return (stmts, top1 ++ top2)
| otherwise = panic $ "prefetch locality level integer must be between 0 and 3, given: " ++ (show localityInt)
--- Handle PopCnt and BSwap that need to only convert arg and return types
+-- Handle PopCnt, Clz, Ctz, and BSwap that need to only convert arg
+-- and return types
genCall t@(PrimTarget (MO_PopCnt w)) dsts args =
genCallSimpleCast w t dsts args
+genCall t@(PrimTarget (MO_Clz w)) dsts args =
+ genCallSimpleCast w t dsts args
+genCall t@(PrimTarget (MO_Ctz w)) dsts args =
+ genCallSimpleCast w t dsts args
genCall t@(PrimTarget (MO_BSwap w)) dsts args =
genCallSimpleCast w t dsts args
+genCall (PrimTarget (MO_AtomicRead _)) [dst] [addr] = do
+ dstV <- getCmmReg (CmmLocal dst)
+ (v1, stmts, top) <- genLoad True addr (localRegType dst)
+ let stmt1 = Store v1 dstV
+ return (stmts `snocOL` stmt1, top)
+
+-- TODO: implement these properly rather than calling to RTS functions.
+-- genCall t@(PrimTarget (MO_AtomicWrite width)) [] [addr, val] = undefined
+-- genCall t@(PrimTarget (MO_AtomicRMW width amop)) [dst] [addr, n] = undefined
+-- genCall t@(PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] = undefined
+
-- Handle memcpy function specifically since llvm's intrinsic version takes
-- some extra parameters.
genCall t@(PrimTarget op) [] args'
@@ -546,10 +566,11 @@ cmmPrimOpFunctions mop = do
(MO_PopCnt w) -> fsLit $ "llvm.ctpop." ++ showSDoc dflags (ppr $ widthToLlvmInt w)
(MO_BSwap w) -> fsLit $ "llvm.bswap." ++ showSDoc dflags (ppr $ widthToLlvmInt w)
+ (MO_Clz w) -> fsLit $ "llvm.ctlz." ++ showSDoc dflags (ppr $ widthToLlvmInt w)
+ (MO_Ctz w) -> fsLit $ "llvm.cttz." ++ showSDoc dflags (ppr $ widthToLlvmInt w)
(MO_Prefetch_Data _ )-> fsLit "llvm.prefetch"
-
MO_S_QuotRem {} -> unsupported
MO_U_QuotRem {} -> unsupported
MO_U_QuotRem2 {} -> unsupported
@@ -559,6 +580,12 @@ cmmPrimOpFunctions mop = do
MO_Touch -> unsupported
MO_UF_Conv _ -> unsupported
+ MO_AtomicRead _ -> unsupported
+
+ MO_AtomicRMW w amop -> fsLit $ atomicRMWLabel w amop
+ MO_Cmpxchg w -> fsLit $ cmpxchgLabel w
+ MO_AtomicWrite w -> fsLit $ atomicWriteLabel w
+
-- | Tail function calls
genJump :: CmmExpr -> [GlobalReg] -> LlvmM StmtData
@@ -805,7 +832,7 @@ genSwitch cond maybe_ids = do
let pairs = [ (ix, id) | (ix,Just id) <- zip [0..] maybe_ids ]
let labels = map (\(ix, b) -> (mkIntLit ty ix, blockIdToLlvm b)) pairs
- -- out of range is undefied, so lets just branch to first label
+ -- out of range is undefined, so let's just branch to first label
let (_, defLbl) = head labels
let s1 = Switch vc defLbl labels
@@ -850,7 +877,7 @@ exprToVarOpt opt e = case e of
-> genLit opt lit
CmmLoad e' ty
- -> genLoad e' ty
+ -> genLoad False e' ty
-- Cmmreg in expression is the value, so must load. If you want actual
-- reg pointer, call getCmmReg directly.
@@ -1002,8 +1029,8 @@ genMachOp _ op [x] = case op of
sameConv from ty reduce expand = do
x'@(vx, stmts, top) <- exprToVar x
let sameConv' op = do
- (v1, s1) <- doExpr ty $ Cast op vx ty
- return (v1, stmts `snocOL` s1, top)
+ (v1, s1) <- doExpr ty $ Cast op vx ty
+ return (v1, stmts `snocOL` s1, top)
dflags <- getDynFlags
let toWidth = llvmWidthInBits dflags ty
-- LLVM doesn't like trying to convert to same width, so
@@ -1269,41 +1296,41 @@ genMachOp_slow _ _ _ = panic "genMachOp: More then 2 expressions in MachOp!"
-- | Handle CmmLoad expression.
-genLoad :: CmmExpr -> CmmType -> LlvmM ExprData
+genLoad :: Atomic -> CmmExpr -> CmmType -> LlvmM ExprData
-- First we try to detect a few common cases and produce better code for
-- these then the default case. We are mostly trying to detect Cmm code
-- like I32[Sp + n] and use 'getelementptr' operations instead of the
-- generic case that uses casts and pointer arithmetic
-genLoad e@(CmmReg (CmmGlobal r)) ty
- = genLoad_fast e r 0 ty
+genLoad atomic e@(CmmReg (CmmGlobal r)) ty
+ = genLoad_fast atomic e r 0 ty
-genLoad e@(CmmRegOff (CmmGlobal r) n) ty
- = genLoad_fast e r n ty
+genLoad atomic e@(CmmRegOff (CmmGlobal r) n) ty
+ = genLoad_fast atomic e r n ty
-genLoad e@(CmmMachOp (MO_Add _) [
+genLoad atomic e@(CmmMachOp (MO_Add _) [
(CmmReg (CmmGlobal r)),
(CmmLit (CmmInt n _))])
ty
- = genLoad_fast e r (fromInteger n) ty
+ = genLoad_fast atomic e r (fromInteger n) ty
-genLoad e@(CmmMachOp (MO_Sub _) [
+genLoad atomic e@(CmmMachOp (MO_Sub _) [
(CmmReg (CmmGlobal r)),
(CmmLit (CmmInt n _))])
ty
- = genLoad_fast e r (negate $ fromInteger n) ty
+ = genLoad_fast atomic e r (negate $ fromInteger n) ty
-- generic case
-genLoad e ty
+genLoad atomic e ty
= do other <- getTBAAMeta otherN
- genLoad_slow e ty other
+ genLoad_slow atomic e ty other
-- | Handle CmmLoad expression.
-- This is a special case for loading from a global register pointer
-- offset such as I32[Sp+8].
-genLoad_fast :: CmmExpr -> GlobalReg -> Int -> CmmType
- -> LlvmM ExprData
-genLoad_fast e r n ty = do
+genLoad_fast :: Atomic -> CmmExpr -> GlobalReg -> Int -> CmmType
+ -> LlvmM ExprData
+genLoad_fast atomic e r n ty = do
dflags <- getDynFlags
(gv, grt, s1) <- getCmmRegVal (CmmGlobal r)
meta <- getTBAARegMeta r
@@ -1316,7 +1343,7 @@ genLoad_fast e r n ty = do
case grt == ty' of
-- were fine
True -> do
- (var, s3) <- doExpr ty' (MExpr meta $ Load ptr)
+ (var, s3) <- doExpr ty' (MExpr meta $ loadInstr ptr)
return (var, s1 `snocOL` s2 `snocOL` s3,
[])
@@ -1324,32 +1351,34 @@ genLoad_fast e r n ty = do
False -> do
let pty = pLift ty'
(ptr', s3) <- doExpr pty $ Cast LM_Bitcast ptr pty
- (var, s4) <- doExpr ty' (MExpr meta $ Load ptr')
+ (var, s4) <- doExpr ty' (MExpr meta $ loadInstr ptr')
return (var, s1 `snocOL` s2 `snocOL` s3
`snocOL` s4, [])
-- If its a bit type then we use the slow method since
-- we can't avoid casting anyway.
- False -> genLoad_slow e ty meta
-
+ False -> genLoad_slow atomic e ty meta
+ where
+ loadInstr ptr | atomic = ALoad SyncSeqCst False ptr
+ | otherwise = Load ptr
-- | Handle Cmm load expression.
-- Generic case. Uses casts and pointer arithmetic if needed.
-genLoad_slow :: CmmExpr -> CmmType -> [MetaAnnot] -> LlvmM ExprData
-genLoad_slow e ty meta = do
+genLoad_slow :: Atomic -> CmmExpr -> CmmType -> [MetaAnnot] -> LlvmM ExprData
+genLoad_slow atomic e ty meta = do
(iptr, stmts, tops) <- exprToVar e
dflags <- getDynFlags
case getVarType iptr of
LMPointer _ -> do
(dvar, load) <- doExpr (cmmToLlvmType ty)
- (MExpr meta $ Load iptr)
+ (MExpr meta $ loadInstr iptr)
return (dvar, stmts `snocOL` load, tops)
i@(LMInt _) | i == llvmWord dflags -> do
let pty = LMPointer $ cmmToLlvmType ty
(ptr, cast) <- doExpr pty $ Cast LM_Inttoptr iptr pty
(dvar, load) <- doExpr (cmmToLlvmType ty)
- (MExpr meta $ Load ptr)
+ (MExpr meta $ loadInstr ptr)
return (dvar, stmts `snocOL` cast `snocOL` load, tops)
other -> do dflags <- getDynFlags
@@ -1358,6 +1387,9 @@ genLoad_slow e ty meta = do
"Size of Ptr: " ++ show (llvmPtrBits dflags) ++
", Size of var: " ++ show (llvmWidthInBits dflags other) ++
", Var: " ++ showSDoc dflags (ppr iptr)))
+ where
+ loadInstr ptr | atomic = ALoad SyncSeqCst False ptr
+ | otherwise = Load ptr
-- | Handle CmmReg expression. This will return a pointer to the stack
diff --git a/compiler/llvmGen/LlvmCodeGen/Data.hs b/compiler/llvmGen/LlvmCodeGen/Data.hs
index 6212cfc9fbf6..1dbfb4b52769 100644
--- a/compiler/llvmGen/LlvmCodeGen/Data.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Data.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
-- ----------------------------------------------------------------------------
-- | Handle conversion of CmmData to LLVM code.
--
diff --git a/compiler/llvmGen/LlvmCodeGen/Ppr.hs b/compiler/llvmGen/LlvmCodeGen/Ppr.hs
index 202e685c0e0f..9c6a719613cf 100644
--- a/compiler/llvmGen/LlvmCodeGen/Ppr.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Ppr.hs
@@ -1,7 +1,8 @@
+{-# LANGUAGE CPP #-}
+
-- ----------------------------------------------------------------------------
-- | Pretty print helpers for the LLVM Code generator.
--
-
module LlvmCodeGen.Ppr (
pprLlvmHeader, pprLlvmCmmDecl, pprLlvmData, infoSection, iTableSuf
) where
diff --git a/compiler/llvmGen/LlvmCodeGen/Regs.hs b/compiler/llvmGen/LlvmCodeGen/Regs.hs
index 9f20aa5de567..004865906978 100644
--- a/compiler/llvmGen/LlvmCodeGen/Regs.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Regs.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
--------------------------------------------------------------------------------
-- | Deal with Cmm registers
--
diff --git a/compiler/llvmGen/LlvmMangler.hs b/compiler/llvmGen/LlvmMangler.hs
index a9054174e145..8652a890cf83 100644
--- a/compiler/llvmGen/LlvmMangler.hs
+++ b/compiler/llvmGen/LlvmMangler.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- -----------------------------------------------------------------------------
-- | GHC LLVM Mangler
--
@@ -56,13 +58,23 @@ llvmFixupAsm dflags f1 f2 = {-# SCC "llvm_mangler" #-} do
hClose w
return ()
+-- | This rewrites @.type@ annotations of function symbols to @%object@.
+-- This is done as the linker can relocate @%functions@ through the
+-- Procedure Linking Table (PLT). This is bad since we expect that the
+-- info table will appear directly before the symbol's location. In the
+-- case that the PLT is used, this will be not an info table but instead
+-- some random PLT garbage.
rewriteSymType :: B.ByteString -> B.ByteString
rewriteSymType s =
- foldl (\s' (typeFunc,typeObj)->replace typeFunc typeObj s') s types
+ B.unlines $ map (rewrite '@' . rewrite '%') $ B.lines s
where
- types = [ (B.pack "@function", B.pack "@object")
- , (B.pack "%function", B.pack "%object")
- ]
+ rewrite :: Char -> B.ByteString -> B.ByteString
+ rewrite prefix x
+ | isType x = replace funcType objType x
+ | otherwise = x
+ where
+ funcType = prefix `B.cons` B.pack "function"
+ objType = prefix `B.cons` B.pack "object"
-- | Splits the file contents into its sections
readSections :: Handle -> Handle -> IO [Section]
diff --git a/compiler/main/BreakArray.hs b/compiler/main/BreakArray.hs
index d16d6f229dc3..6455912b671f 100644
--- a/compiler/main/BreakArray.hs
+++ b/compiler/main/BreakArray.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-}
+
-------------------------------------------------------------------------------
--
-- | Break Arrays in the IO monad
diff --git a/compiler/main/CmdLineParser.hs b/compiler/main/CmdLineParser.hs
index 22811d44cc1b..5ee7086cbc77 100644
--- a/compiler/main/CmdLineParser.hs
+++ b/compiler/main/CmdLineParser.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-------------------------------------------------------------------------------
--
-- | Command-line parser
diff --git a/compiler/main/CodeOutput.lhs b/compiler/main/CodeOutput.lhs
index b8b187241b71..043163790afb 100644
--- a/compiler/main/CodeOutput.lhs
+++ b/compiler/main/CodeOutput.lhs
@@ -4,6 +4,8 @@
\section{Code output phase}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module CodeOutput( codeOutput, outputForeignStubs ) where
#include "HsVersions.h"
@@ -48,7 +50,7 @@ codeOutput :: DynFlags
-> FilePath
-> ModLocation
-> ForeignStubs
- -> [PackageId]
+ -> [PackageKey]
-> Stream IO RawCmmGroup () -- Compiled C--
-> IO (FilePath,
(Bool{-stub_h_exists-}, Maybe FilePath{-stub_c_exists-}))
@@ -72,10 +74,9 @@ codeOutput dflags this_mod filenm location foreign_stubs pkg_deps cmm_stream
; return cmm
}
- ; showPass dflags "CodeOutput"
; stubs_exist <- outputForeignStubs dflags this_mod location foreign_stubs
; case hscTarget dflags of {
- HscAsm -> outputAsm dflags this_mod filenm linted_cmm_stream;
+ HscAsm -> outputAsm dflags this_mod location filenm linted_cmm_stream;
HscC -> outputC dflags filenm linted_cmm_stream pkg_deps;
HscLlvm -> outputLlvm dflags filenm linted_cmm_stream;
HscInterpreted -> panic "codeOutput: HscInterpreted";
@@ -99,7 +100,7 @@ doOutput filenm io_action = bracket (openFile filenm WriteMode) hClose io_action
outputC :: DynFlags
-> FilePath
-> Stream IO RawCmmGroup ()
- -> [PackageId]
+ -> [PackageKey]
-> IO ()
outputC dflags filenm cmm_stream packages
@@ -114,7 +115,7 @@ outputC dflags filenm cmm_stream packages
-- * -#include options from the cmdline and OPTIONS pragmas
-- * the _stub.h file, if there is one.
--
- let rts = getPackageDetails (pkgState dflags) rtsPackageId
+ let rts = getPackageDetails dflags rtsPackageKey
let cc_injects = unlines (map mk_include (includes rts))
mk_include h_file =
@@ -140,8 +141,8 @@ outputC dflags filenm cmm_stream packages
%************************************************************************
\begin{code}
-outputAsm :: DynFlags -> Module -> FilePath -> Stream IO RawCmmGroup () -> IO ()
-outputAsm dflags this_mod filenm cmm_stream
+outputAsm :: DynFlags -> Module -> ModLocation -> FilePath -> Stream IO RawCmmGroup () -> IO ()
+outputAsm dflags this_mod location filenm cmm_stream
| cGhcWithNativeCodeGen == "YES"
= do ncg_uniqs <- mkSplitUniqSupply 'n'
@@ -149,7 +150,7 @@ outputAsm dflags this_mod filenm cmm_stream
_ <- {-# SCC "OutputAsm" #-} doOutput filenm $
\h -> {-# SCC "NativeCodeGen" #-}
- nativeCodeGen dflags this_mod h ncg_uniqs cmm_stream
+ nativeCodeGen dflags this_mod location h ncg_uniqs cmm_stream
return ()
| otherwise
@@ -190,11 +191,8 @@ outputForeignStubs dflags mod location stubs
stub_c <- newTempName dflags "c"
case stubs of
- NoStubs -> do
- -- When compiling External Core files, may need to use stub
- -- files from a previous compilation
- stub_h_exists <- doesFileExist stub_h
- return (stub_h_exists, Nothing)
+ NoStubs ->
+ return (False, Nothing)
ForeignStubs h_code c_code -> do
let
@@ -212,7 +210,7 @@ outputForeignStubs dflags mod location stubs
-- we need the #includes from the rts package for the stub files
let rts_includes =
- let rts_pkg = getPackageDetails (pkgState dflags) rtsPackageId in
+ let rts_pkg = getPackageDetails dflags rtsPackageKey in
concatMap mk_include (includes rts_pkg)
mk_include i = "#include \"" ++ i ++ "\"\n"
diff --git a/compiler/main/DriverMkDepend.hs b/compiler/main/DriverMkDepend.hs
index cda0b4729f6e..03545d482864 100644
--- a/compiler/main/DriverMkDepend.hs
+++ b/compiler/main/DriverMkDepend.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Makefile Dependency Generation
diff --git a/compiler/main/DriverPhases.hs b/compiler/main/DriverPhases.hs
index 2de19b9795cc..fa8b2d060f14 100644
--- a/compiler/main/DriverPhases.hs
+++ b/compiler/main/DriverPhases.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
-- $Id: DriverPhases.hs,v 1.38 2005/05/17 11:01:59 simonmar Exp $
--
@@ -18,7 +20,6 @@ module DriverPhases (
isHaskellSrcSuffix,
isObjectSuffix,
isCishSuffix,
- isExtCoreSuffix,
isDynLibSuffix,
isHaskellUserSrcSuffix,
isSourceSuffix,
@@ -27,7 +28,6 @@ module DriverPhases (
isHaskellSrcFilename,
isObjectFilename,
isCishFilename,
- isExtCoreFilename,
isDynLibFilename,
isHaskellUserSrcFilename,
isSourceFilename
@@ -56,7 +56,7 @@ import System.FilePath
-}
data HscSource
- = HsSrcFile | HsBootFile | ExtCoreFile
+ = HsSrcFile | HsBootFile
deriving( Eq, Ord, Show )
-- Ord needed for the finite maps we build in CompManager
@@ -64,7 +64,6 @@ data HscSource
hscSourceString :: HscSource -> String
hscSourceString HsSrcFile = ""
hscSourceString HsBootFile = "[boot]"
-hscSourceString ExtCoreFile = "[ext core]"
isHsBoot :: HscSource -> Bool
isHsBoot HsBootFile = True
@@ -82,7 +81,7 @@ data Phase
| HCc -- Haskellised C (as opposed to vanilla C) compilation
| Splitter -- Assembly file splitter (part of '-split-objs')
| SplitAs -- Assembler for split assembly files (part of '-split-objs')
- | As -- Assembler for regular assembly files
+ | As Bool -- Assembler for regular assembly files (Bool: with-cpp)
| LlvmOpt -- Run LLVM opt tool over llvm assembly
| LlvmLlc -- LLVM bitcode to native assembly
| LlvmMangle -- Fix up TNTC by processing assembly produced by LLVM
@@ -119,7 +118,7 @@ eqPhase Cobjcpp Cobjcpp = True
eqPhase HCc HCc = True
eqPhase Splitter Splitter = True
eqPhase SplitAs SplitAs = True
-eqPhase As As = True
+eqPhase (As x) (As y) = x == y
eqPhase LlvmOpt LlvmOpt = True
eqPhase LlvmLlc LlvmLlc = True
eqPhase LlvmMangle LlvmMangle = True
@@ -150,21 +149,21 @@ nextPhase dflags p
Splitter -> SplitAs
LlvmOpt -> LlvmLlc
LlvmLlc -> LlvmMangle
- LlvmMangle -> As
+ LlvmMangle -> As False
SplitAs -> MergeStub
- As -> MergeStub
- Ccpp -> As
- Cc -> As
- Cobjc -> As
- Cobjcpp -> As
+ As _ -> MergeStub
+ Ccpp -> As False
+ Cc -> As False
+ Cobjc -> As False
+ Cobjcpp -> As False
CmmCpp -> Cmm
Cmm -> maybeHCc
- HCc -> As
+ HCc -> As False
MergeStub -> StopLn
StopLn -> panic "nextPhase: nothing after StopLn"
where maybeHCc = if platformUnregisterised (targetPlatform dflags)
then HCc
- else As
+ else As False
-- the first compilation phase for a given file is determined
-- by its suffix.
@@ -175,7 +174,6 @@ startPhase "hs" = Cpp HsSrcFile
startPhase "hs-boot" = Cpp HsBootFile
startPhase "hscpp" = HsPp HsSrcFile
startPhase "hspp" = Hsc HsSrcFile
-startPhase "hcr" = Hsc ExtCoreFile
startPhase "hc" = HCc
startPhase "c" = Cc
startPhase "cpp" = Ccpp
@@ -186,8 +184,8 @@ startPhase "mm" = Cobjcpp
startPhase "cc" = Ccpp
startPhase "cxx" = Ccpp
startPhase "split_s" = Splitter
-startPhase "s" = As
-startPhase "S" = As
+startPhase "s" = As False
+startPhase "S" = As True
startPhase "ll" = LlvmOpt
startPhase "bc" = LlvmLlc
startPhase "lm_s" = LlvmMangle
@@ -202,7 +200,6 @@ startPhase _ = StopLn -- all unknown file types
phaseInputExt :: Phase -> String
phaseInputExt (Unlit HsSrcFile) = "lhs"
phaseInputExt (Unlit HsBootFile) = "lhs-boot"
-phaseInputExt (Unlit ExtCoreFile) = "lhcr"
phaseInputExt (Cpp _) = "lpp" -- intermediate only
phaseInputExt (HsPp _) = "hscpp" -- intermediate only
phaseInputExt (Hsc _) = "hspp" -- intermediate only
@@ -215,7 +212,8 @@ phaseInputExt Cobjc = "m"
phaseInputExt Cobjcpp = "mm"
phaseInputExt Cc = "c"
phaseInputExt Splitter = "split_s"
-phaseInputExt As = "s"
+phaseInputExt (As True) = "S"
+phaseInputExt (As False) = "s"
phaseInputExt LlvmOpt = "ll"
phaseInputExt LlvmLlc = "bc"
phaseInputExt LlvmMangle = "lm_s"
@@ -226,13 +224,12 @@ phaseInputExt MergeStub = "o"
phaseInputExt StopLn = "o"
haskellish_src_suffixes, haskellish_suffixes, cish_suffixes,
- extcoreish_suffixes, haskellish_user_src_suffixes
+ haskellish_user_src_suffixes
:: [String]
haskellish_src_suffixes = haskellish_user_src_suffixes ++
[ "hspp", "hscpp", "hcr", "cmm", "cmmcpp" ]
haskellish_suffixes = haskellish_src_suffixes ++ ["hc", "raw_s"]
cish_suffixes = [ "c", "cpp", "C", "cc", "cxx", "s", "S", "ll", "bc", "lm_s", "m", "M", "mm" ]
-extcoreish_suffixes = [ "hcr" ]
-- Will not be deleted as temp files:
haskellish_user_src_suffixes = [ "hs", "lhs", "hs-boot", "lhs-boot" ]
@@ -240,22 +237,21 @@ objish_suffixes :: Platform -> [String]
-- Use the appropriate suffix for the system on which
-- the GHC-compiled code will run
objish_suffixes platform = case platformOS platform of
- OSMinGW32 -> [ "o", "O", "obj", "OBJ" ]
- _ -> [ "o" ]
+ OSMinGW32 -> [ "o", "O", "obj", "OBJ" ]
+ _ -> [ "o" ]
dynlib_suffixes :: Platform -> [String]
dynlib_suffixes platform = case platformOS platform of
- OSMinGW32 -> ["dll", "DLL"]
- OSDarwin -> ["dylib"]
- _ -> ["so"]
+ OSMinGW32 -> ["dll", "DLL"]
+ OSDarwin -> ["dylib", "so"]
+ _ -> ["so"]
-isHaskellishSuffix, isHaskellSrcSuffix, isCishSuffix, isExtCoreSuffix,
+isHaskellishSuffix, isHaskellSrcSuffix, isCishSuffix,
isHaskellUserSrcSuffix
:: String -> Bool
isHaskellishSuffix s = s `elem` haskellish_suffixes
isHaskellSrcSuffix s = s `elem` haskellish_src_suffixes
isCishSuffix s = s `elem` cish_suffixes
-isExtCoreSuffix s = s `elem` extcoreish_suffixes
isHaskellUserSrcSuffix s = s `elem` haskellish_user_src_suffixes
isObjectSuffix, isDynLibSuffix :: Platform -> String -> Bool
@@ -266,13 +262,12 @@ isSourceSuffix :: String -> Bool
isSourceSuffix suff = isHaskellishSuffix suff || isCishSuffix suff
isHaskellishFilename, isHaskellSrcFilename, isCishFilename,
- isExtCoreFilename, isHaskellUserSrcFilename, isSourceFilename
+ isHaskellUserSrcFilename, isSourceFilename
:: FilePath -> Bool
-- takeExtension return .foo, so we drop 1 to get rid of the .
isHaskellishFilename f = isHaskellishSuffix (drop 1 $ takeExtension f)
isHaskellSrcFilename f = isHaskellSrcSuffix (drop 1 $ takeExtension f)
isCishFilename f = isCishSuffix (drop 1 $ takeExtension f)
-isExtCoreFilename f = isExtCoreSuffix (drop 1 $ takeExtension f)
isHaskellUserSrcFilename f = isHaskellUserSrcSuffix (drop 1 $ takeExtension f)
isSourceFilename f = isSourceSuffix (drop 1 $ takeExtension f)
diff --git a/compiler/main/DriverPipeline.hs b/compiler/main/DriverPipeline.hs
index 3d34bdbcac67..183f43529636 100644
--- a/compiler/main/DriverPipeline.hs
+++ b/compiler/main/DriverPipeline.hs
@@ -1,5 +1,5 @@
-{-# OPTIONS -fno-cse #-}
-{-# LANGUAGE NamedFieldPuns #-}
+{-# LANGUAGE CPP, NamedFieldPuns, NondecreasingIndentation #-}
+{-# OPTIONS_GHC -fno-cse #-}
-- -fno-cse is needed for GLOBAL_VAR's to behave properly
-----------------------------------------------------------------------------
@@ -54,7 +54,6 @@ import Util
import StringBuffer ( hGetStringBuffer )
import BasicTypes ( SuccessFlag(..) )
import Maybes ( expectJust )
-import ParserCoreUtils ( getCoreModuleName )
import SrcLoc
import FastString
import LlvmCodeGen ( llvmFixupAsm )
@@ -139,11 +138,13 @@ compileOne' m_tc_result mHscMessage
input_fnpp = ms_hspp_file summary
mod_graph = hsc_mod_graph hsc_env0
needsTH = any (xopt Opt_TemplateHaskell . ms_hspp_opts) mod_graph
+ needsQQ = any (xopt Opt_QuasiQuotes . ms_hspp_opts) mod_graph
+ needsLinker = needsTH || needsQQ
isDynWay = any (== WayDyn) (ways dflags0)
isProfWay = any (== WayProf) (ways dflags0)
-- #8180 - when using TemplateHaskell, switch on -dynamic-too so
-- the linker can correctly load the object files.
- let dflags1 = if needsTH && dynamicGhc && not isDynWay && not isProfWay
+ let dflags1 = if needsLinker && dynamicGhc && not isDynWay && not isProfWay
then gopt_set dflags0 Opt_BuildDynamicToo
else dflags0
@@ -167,8 +168,6 @@ compileOne' m_tc_result mHscMessage
output_fn <- getOutputFilename next_phase
Temporary basename dflags next_phase (Just location)
- let extCore_filename = basename ++ ".hcr"
-
-- -fforce-recomp should also work with --make
let force_recomp = gopt Opt_ForceRecomp dflags
source_modified
@@ -205,7 +204,7 @@ compileOne' m_tc_result mHscMessage
hm_linkable = maybe_old_linkable })
_ -> do guts0 <- hscDesugar hsc_env summary tc_result
guts <- hscSimplify hsc_env guts0
- (iface, _changed, details, cgguts) <- hscNormalIface hsc_env extCore_filename guts mb_old_hash
+ (iface, _changed, details, cgguts) <- hscNormalIface hsc_env guts mb_old_hash
(hasStub, comp_bc, modBreaks) <- hscInteractive hsc_env cgguts summary
stub_o <- case hasStub of
@@ -229,7 +228,9 @@ compileOne' m_tc_result mHscMessage
hm_iface = iface,
hm_linkable = Just linkable })
HscNothing ->
- do (iface, _changed, details) <- hscSimpleIface hsc_env tc_result mb_old_hash
+ do (iface, changed, details) <- hscSimpleIface hsc_env tc_result mb_old_hash
+ when (gopt Opt_WriteInterface dflags) $
+ hscWriteIface dflags iface changed summary
let linkable = if isHsBoot src_flavour
then maybe_old_linkable
else Just (LM (ms_hs_date summary) this_mod [])
@@ -249,7 +250,7 @@ compileOne' m_tc_result mHscMessage
_ -> do guts0 <- hscDesugar hsc_env summary tc_result
guts <- hscSimplify hsc_env guts0
- (iface, changed, details, cgguts) <- hscNormalIface hsc_env extCore_filename guts mb_old_hash
+ (iface, changed, details, cgguts) <- hscNormalIface hsc_env guts mb_old_hash
hscWriteIface dflags iface changed summary
-- We're in --make mode: finish the compilation pipeline.
@@ -389,7 +390,7 @@ link' dflags batch_attempt_linking hpt
return Succeeded
-linkingNeeded :: DynFlags -> Bool -> [Linkable] -> [PackageId] -> IO Bool
+linkingNeeded :: DynFlags -> Bool -> [Linkable] -> [PackageKey] -> IO Bool
linkingNeeded dflags staticLink linkables pkg_deps = do
-- if the modification time on the executable is later than the
-- modification times on all of the objects and libraries, then omit
@@ -410,9 +411,8 @@ linkingNeeded dflags staticLink linkables pkg_deps = do
-- next, check libraries. XXX this only checks Haskell libraries,
-- not extra_libraries or -l things from the command line.
- let pkg_map = pkgIdMap (pkgState dflags)
- pkg_hslibs = [ (libraryDirs c, lib)
- | Just c <- map (lookupPackage pkg_map) pkg_deps,
+ let pkg_hslibs = [ (libraryDirs c, lib)
+ | Just c <- map (lookupPackage dflags) pkg_deps,
lib <- packageHsLibs dflags c ]
pkg_libfiles <- mapM (uncurry (findHSLib dflags)) pkg_hslibs
@@ -426,7 +426,7 @@ linkingNeeded dflags staticLink linkables pkg_deps = do
-- Returns 'False' if it was, and we can avoid linking, because the
-- previous binary was linked with "the same options".
-checkLinkInfo :: DynFlags -> [PackageId] -> FilePath -> IO Bool
+checkLinkInfo :: DynFlags -> [PackageKey] -> FilePath -> IO Bool
checkLinkInfo dflags pkg_deps exe_file
| not (platformSupportsSavingLinkOpts (platformOS (targetPlatform dflags)))
-- ToDo: Windows and OS X do not use the ELF binary format, so
@@ -496,8 +496,8 @@ compileFile hsc_env stop_phase (src, mb_phase) = do
| otherwise = Persistent
stop_phase' = case stop_phase of
- As | split -> SplitAs
- _ -> stop_phase
+ As _ | split -> SplitAs
+ _ -> stop_phase
( _, out_file) <- runPipeline stop_phase' hsc_env
(src, fmap RealPhase mb_phase) Nothing output
@@ -728,7 +728,7 @@ getOutputFilename stop_phase output basename dflags next_phase maybe_location
-- sometimes, we keep output from intermediate stages
keep_this_output =
case next_phase of
- As | keep_s -> True
+ As _ | keep_s -> True
LlvmOpt | keep_bc -> True
HCc | keep_hc -> True
_other -> False
@@ -890,16 +890,11 @@ runPhase (RealPhase (Hsc src_flavour)) input_fn dflags0
setDynFlags dflags
-- gather the imports and module name
- (hspp_buf,mod_name,imps,src_imps) <- liftIO $
- case src_flavour of
- ExtCoreFile -> do -- no explicit imports in ExtCore input.
- m <- getCoreModuleName input_fn
- return (Nothing, mkModuleName m, [], [])
-
- _ -> do
- buf <- hGetStringBuffer input_fn
- (src_imps,imps,L _ mod_name) <- getImports dflags buf input_fn (basename <.> suff)
- return (Just buf, mod_name, imps, src_imps)
+ (hspp_buf,mod_name,imps,src_imps) <- liftIO $ do
+ do
+ buf <- hGetStringBuffer input_fn
+ (src_imps,imps,L _ mod_name) <- getImports dflags buf input_fn (basename <.> suff)
+ return (Just buf, mod_name, imps, src_imps)
-- Take -o into account if present
-- Very like -ohi, but we must *only* do this if we aren't linking
@@ -934,8 +929,6 @@ runPhase (RealPhase (Hsc src_flavour)) input_fn dflags0
then return SourceUnmodified
else return SourceModified
- let extCore_filename = basename ++ ".hcr"
-
PipeState{hsc_env=hsc_env'} <- getPipeState
-- Tell the finder cache about this module
@@ -955,7 +948,7 @@ runPhase (RealPhase (Hsc src_flavour)) input_fn dflags0
ms_srcimps = src_imps }
-- run the compiler!
- result <- liftIO $ hscCompileOneShot hsc_env' extCore_filename
+ result <- liftIO $ hscCompileOneShot hsc_env'
mod_summary source_unchanged
return (HscOut src_flavour mod_name result,
@@ -1043,7 +1036,7 @@ runPhase (RealPhase cc_phase) input_fn dflags
-- files; this is the Value Add(TM) that using ghc instead of
-- gcc gives you :)
pkg_include_dirs <- liftIO $ getPackageIncludePath dflags pkgs
- let include_paths = foldr (\ x xs -> "-I" : x : xs) []
+ let include_paths = foldr (\ x xs -> ("-I" ++ x) : xs) []
(cmdline_include_paths ++ pkg_include_dirs)
let gcc_extra_viac_flags = extraGccViaCFlags dflags
@@ -1076,7 +1069,7 @@ runPhase (RealPhase cc_phase) input_fn dflags
| otherwise = []
-- Decide next phase
- let next_phase = As
+ let next_phase = As False
output_fn <- phaseOutputFilename next_phase
let
@@ -1119,7 +1112,7 @@ runPhase (RealPhase cc_phase) input_fn dflags
-- way we do the import depends on whether we're currently compiling
-- the base package or not.
++ (if platformOS platform == OSMinGW32 &&
- thisPackage dflags == basePackageId
+ thisPackage dflags == basePackageKey
then [ "-DCOMPILING_BASE_PACKAGE" ]
else [])
@@ -1188,7 +1181,7 @@ runPhase (RealPhase Splitter) input_fn dflags
-- As, SpitAs phase : Assembler
-- This is for calling the assembler on a regular assembly file (not split).
-runPhase (RealPhase As) input_fn dflags
+runPhase (RealPhase (As with_cpp)) input_fn dflags
= do
-- LLVM from version 3.0 onwards doesn't support the OS X system
-- assembler, so we use clang as the assembler instead. (#5636)
@@ -1214,6 +1207,7 @@ runPhase (RealPhase As) input_fn dflags
-- might be a hierarchical module.
liftIO $ createDirectoryIfMissing True (takeDirectory output_fn)
+ ccInfo <- liftIO $ getCompilerInfo dflags
let runAssembler inputFilename outputFilename
= liftIO $ as_prog dflags
([ SysTools.Option ("-I" ++ p) | p <- cmdline_include_paths ]
@@ -1228,8 +1222,13 @@ runPhase (RealPhase As) input_fn dflags
++ (if platformArch (targetPlatform dflags) == ArchSPARC
then [SysTools.Option "-mcpu=v9"]
else [])
-
- ++ [ SysTools.Option "-x", SysTools.Option "assembler-with-cpp"
+ ++ (if any (ccInfo ==) [Clang, AppleClang, AppleClang51]
+ then [SysTools.Option "-Qunused-arguments"]
+ else [])
+ ++ [ SysTools.Option "-x"
+ , if with_cpp
+ then SysTools.Option "assembler-with-cpp"
+ else SysTools.Option "assembler"
, SysTools.Option "-c"
, SysTools.FileOption "" inputFilename
, SysTools.Option "-o"
@@ -1254,6 +1253,7 @@ runPhase (RealPhase SplitAs) _input_fn dflags
osuf = objectSuf dflags
split_odir = base_o ++ "_" ++ osuf ++ "_split"
+ -- this also creates the hierarchy
liftIO $ createDirectoryIfMissing True split_odir
-- remove M_split/ *.o, because we're going to archive M_split/ *.o
@@ -1335,7 +1335,7 @@ runPhase (RealPhase LlvmOpt) input_fn dflags
-- passes only, so if the user is passing us extra options we assume
-- they know what they are doing and don't get in the way.
optFlag = if null (getOpts dflags opt_lo)
- then map SysTools.Option $ words (llvmOpts !! opt_lvl)
+ then map SysTools.Option $ words (llvmOpts ver !! opt_lvl)
else []
tbaa | ver < 29 = "" -- no tbaa in 2.8 and earlier
| gopt Opt_LlvmTBAA dflags = "--enable-tbaa=true"
@@ -1355,7 +1355,11 @@ runPhase (RealPhase LlvmOpt) input_fn dflags
where
-- we always (unless -optlo specified) run Opt since we rely on it to
-- fix up some pretty big deficiencies in the code we generate
- llvmOpts = ["-mem2reg -globalopt", "-O1", "-O2"]
+ llvmOpts ver = [ "-mem2reg -globalopt"
+ , if ver >= 34 then "-O1 -globalopt" else "-O1"
+ -- LLVM 3.4 -O1 doesn't eliminate aliases reliably (bug #8855)
+ , "-O2"
+ ]
-----------------------------------------------------------------------------
-- LlvmLlc phase
@@ -1379,7 +1383,7 @@ runPhase (RealPhase LlvmLlc) input_fn dflags
let next_phase = case gopt Opt_NoLlvmMangler dflags of
False -> LlvmMangle
True | gopt Opt_SplitObjs dflags -> Splitter
- True -> As
+ True -> As False
output_fn <- phaseOutputFilename next_phase
@@ -1448,7 +1452,7 @@ runPhase (RealPhase LlvmLlc) input_fn dflags
runPhase (RealPhase LlvmMangle) input_fn dflags
= do
- let next_phase = if gopt Opt_SplitObjs dflags then Splitter else As
+ let next_phase = if gopt Opt_SplitObjs dflags then Splitter else As False
output_fn <- phaseOutputFilename next_phase
liftIO $ llvmFixupAsm dflags input_fn output_fn
return (RealPhase next_phase, output_fn)
@@ -1460,6 +1464,7 @@ runPhase (RealPhase MergeStub) input_fn dflags
= do
PipeState{maybe_stub_o} <- getPipeState
output_fn <- phaseOutputFilename StopLn
+ liftIO $ createDirectoryIfMissing True (takeDirectory output_fn)
case maybe_stub_o of
Nothing ->
panic "runPhase(MergeStub): no stub"
@@ -1553,7 +1558,7 @@ mkExtraObj dflags extn xs
= do cFile <- newTempName dflags extn
oFile <- newTempName dflags "o"
writeFile cFile xs
- let rtsDetails = getPackageDetails (pkgState dflags) rtsPackageId
+ let rtsDetails = getPackageDetails dflags rtsPackageKey
SysTools.runCc dflags
([Option "-c",
FileOption "" cFile,
@@ -1602,7 +1607,7 @@ mkExtraObjToLinkIntoBinary dflags = do
-- this was included as inline assembly in the main.c file but this
-- is pretty fragile. gas gets upset trying to calculate relative offsets
-- that span the .note section (notably .text) when debug info is present
-mkNoteObjsToLinkIntoBinary :: DynFlags -> [PackageId] -> IO [FilePath]
+mkNoteObjsToLinkIntoBinary :: DynFlags -> [PackageKey] -> IO [FilePath]
mkNoteObjsToLinkIntoBinary dflags dep_packages = do
link_info <- getLinkInfo dflags dep_packages
@@ -1643,7 +1648,7 @@ mkNoteObjsToLinkIntoBinary dflags dep_packages = do
-- link. We save this information in the binary, and the next time we
-- link, if nothing else has changed, we use the link info stored in
-- the existing binary to decide whether to re-link or not.
-getLinkInfo :: DynFlags -> [PackageId] -> IO String
+getLinkInfo :: DynFlags -> [PackageKey] -> IO String
getLinkInfo dflags dep_packages = do
package_link_opts <- getPackageLinkOpts dflags dep_packages
pkg_frameworks <- if platformUsesFrameworks (targetPlatform dflags)
@@ -1721,13 +1726,13 @@ mk_pvm_wrapper_script pvm_executable pvm_executable_base sysMan = unlines $
-----------------------------------------------------------------------------
-- Look for the /* GHC_PACKAGES ... */ comment at the top of a .hc file
-getHCFilePackages :: FilePath -> IO [PackageId]
+getHCFilePackages :: FilePath -> IO [PackageKey]
getHCFilePackages filename =
Exception.bracket (openFile filename ReadMode) hClose $ \h -> do
l <- hGetLine h
case l of
'/':'*':' ':'G':'H':'C':'_':'P':'A':'C':'K':'A':'G':'E':'S':rest ->
- return (map stringToPackageId (words rest))
+ return (map stringToPackageKey (words rest))
_other ->
return []
@@ -1744,10 +1749,10 @@ getHCFilePackages filename =
-- read any interface files), so the user must explicitly specify all
-- the packages.
-linkBinary :: DynFlags -> [FilePath] -> [PackageId] -> IO ()
+linkBinary :: DynFlags -> [FilePath] -> [PackageKey] -> IO ()
linkBinary = linkBinary' False
-linkBinary' :: Bool -> DynFlags -> [FilePath] -> [PackageId] -> IO ()
+linkBinary' :: Bool -> DynFlags -> [FilePath] -> [PackageKey] -> IO ()
linkBinary' staticLink dflags o_files dep_packages = do
let platform = targetPlatform dflags
mySettings = settings dflags
@@ -1785,6 +1790,15 @@ linkBinary' staticLink dflags o_files dep_packages = do
then []
else ["-Wl,-rpath-link", "-Wl," ++ l]
in ["-L" ++ l] ++ rpathlink ++ rpath
+ | osMachOTarget (platformOS platform) &&
+ dynLibLoader dflags == SystemDependent &&
+ not (gopt Opt_Static dflags) &&
+ gopt Opt_RPath dflags
+ = let libpath = if gopt Opt_RelativeDynlibPaths dflags
+ then "@loader_path" >
+ (l `makeRelativeTo` full_output_fn)
+ else l
+ in ["-L" ++ l] ++ ["-Wl,-rpath", "-Wl," ++ libpath]
| otherwise = ["-L" ++ l]
let lib_paths = libraryPaths dflags
@@ -1858,7 +1872,7 @@ linkBinary' staticLink dflags o_files dep_packages = do
let os = platformOS (targetPlatform dflags)
in if os == OSOsf3 then ["-lpthread", "-lexc"]
else if os `elem` [OSMinGW32, OSFreeBSD, OSOpenBSD,
- OSNetBSD, OSHaiku, OSQNXNTO, OSiOS]
+ OSNetBSD, OSHaiku, OSQNXNTO, OSiOS, OSDarwin]
then []
else ["-lpthread"]
| otherwise = []
@@ -1920,13 +1934,6 @@ linkBinary' staticLink dflags o_files dep_packages = do
then ["-Wl,-read_only_relocs,suppress"]
else [])
- ++ (if platformOS platform == OSDarwin &&
- not staticLink &&
- not (gopt Opt_Static dflags) &&
- gopt Opt_RPath dflags
- then ["-Wl,-rpath","-Wl," ++ topDir dflags]
- else [])
-
++ o_files
++ lib_path_opts)
++ extra_ld_inputs
@@ -2019,7 +2026,7 @@ maybeCreateManifest dflags exe_filename
| otherwise = return []
-linkDynLibCheck :: DynFlags -> [String] -> [PackageId] -> IO ()
+linkDynLibCheck :: DynFlags -> [String] -> [PackageKey] -> IO ()
linkDynLibCheck dflags o_files dep_packages
= do
when (haveRtsOptsFlags dflags) $ do
@@ -2029,7 +2036,7 @@ linkDynLibCheck dflags o_files dep_packages
linkDynLib dflags o_files dep_packages
-linkStaticLibCheck :: DynFlags -> [String] -> [PackageId] -> IO ()
+linkStaticLibCheck :: DynFlags -> [String] -> [PackageKey] -> IO ()
linkStaticLibCheck dflags o_files dep_packages
= do
when (platformOS (targetPlatform dflags) `notElem` [OSiOS, OSDarwin]) $
@@ -2126,26 +2133,27 @@ joinObjectFiles dflags o_files output_fn = do
let mySettings = settings dflags
ldIsGnuLd = sLdIsGnuLd mySettings
osInfo = platformOS (targetPlatform dflags)
- ld_r args ccInfo = SysTools.runLink dflags ([
- SysTools.Option "-nostdlib",
- SysTools.Option "-Wl,-r"
- ]
- ++ (if ccInfo == Clang then []
- else [SysTools.Option "-nodefaultlibs"])
- ++ (if osInfo == OSFreeBSD
- then [SysTools.Option "-L/usr/lib"]
- else [])
- -- gcc on sparc sets -Wl,--relax implicitly, but
- -- -r and --relax are incompatible for ld, so
- -- disable --relax explicitly.
- ++ (if platformArch (targetPlatform dflags) == ArchSPARC
- && ldIsGnuLd
- then [SysTools.Option "-Wl,-no-relax"]
- else [])
- ++ map SysTools.Option ld_build_id
- ++ [ SysTools.Option "-o",
- SysTools.FileOption "" output_fn ]
- ++ args)
+ ld_r args cc = SysTools.runLink dflags ([
+ SysTools.Option "-nostdlib",
+ SysTools.Option "-Wl,-r"
+ ]
+ ++ (if any (cc ==) [Clang, AppleClang, AppleClang51]
+ then []
+ else [SysTools.Option "-nodefaultlibs"])
+ ++ (if osInfo == OSFreeBSD
+ then [SysTools.Option "-L/usr/lib"]
+ else [])
+ -- gcc on sparc sets -Wl,--relax implicitly, but
+ -- -r and --relax are incompatible for ld, so
+ -- disable --relax explicitly.
+ ++ (if platformArch (targetPlatform dflags) == ArchSPARC
+ && ldIsGnuLd
+ then [SysTools.Option "-Wl,-no-relax"]
+ else [])
+ ++ map SysTools.Option ld_build_id
+ ++ [ SysTools.Option "-o",
+ SysTools.FileOption "" output_fn ]
+ ++ args)
-- suppress the generation of the .note.gnu.build-id section,
-- which we don't need and sometimes causes ld to emit a
@@ -2157,7 +2165,9 @@ joinObjectFiles dflags o_files output_fn = do
if ldIsGnuLd
then do
script <- newTempName dflags "ldscript"
- writeFile script $ "INPUT(" ++ unwords o_files ++ ")"
+ cwd <- getCurrentDirectory
+ let o_files_abs = map (cwd >) o_files
+ writeFile script $ "INPUT(" ++ unwords o_files_abs ++ ")"
ld_r [SysTools.FileOption "" script] ccInfo
else if sLdSupportsFilelist mySettings
then do
@@ -2178,7 +2188,7 @@ hscPostBackendPhase dflags _ hsc_lang =
case hsc_lang of
HscC -> HCc
HscAsm | gopt Opt_SplitObjs dflags -> Splitter
- | otherwise -> As
+ | otherwise -> As False
HscLlvm -> LlvmOpt
HscNothing -> StopLn
HscInterpreted -> StopLn
diff --git a/compiler/main/DynFlags.hs b/compiler/main/DynFlags.hs
index 615fdbb08b95..c006480dd78d 100644
--- a/compiler/main/DynFlags.hs
+++ b/compiler/main/DynFlags.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-------------------------------------------------------------------------------
--
-- | Dynamic flags
@@ -11,7 +13,7 @@
--
-------------------------------------------------------------------------------
-{-# OPTIONS -fno-cse #-}
+{-# OPTIONS_GHC -fno-cse #-}
-- -fno-cse is needed for GLOBAL_VAR's to behave properly
module DynFlags (
@@ -30,6 +32,7 @@ module DynFlags (
wopt, wopt_set, wopt_unset,
xopt, xopt_set, xopt_unset,
lang_set,
+ useUnicodeSyntax,
whenGeneratingDynamicToo, ifGeneratingDynamicToo,
whenCannotGenerateDynamicToo,
dynamicTooMkDynamicDynFlags,
@@ -40,7 +43,7 @@ module DynFlags (
targetRetainsAllBindings,
GhcMode(..), isOneShot,
GhcLink(..), isNoLink,
- PackageFlag(..),
+ PackageFlag(..), PackageArg(..), ModRenaming,
PkgConfRef(..),
Option(..), showOpt,
DynLibLoader(..),
@@ -58,7 +61,7 @@ module DynFlags (
safeHaskellOn, safeImportsOn, safeLanguageOn, safeInferOn,
packageTrustOn,
safeDirectImpsReq, safeImplicitImpsReq,
- unsafeFlags,
+ unsafeFlags, unsafeFlagsForInfer,
-- ** System tool settings and locations
Settings(..),
@@ -87,7 +90,7 @@ module DynFlags (
getVerbFlags,
updOptLevel,
setTmpDir,
- setPackageName,
+ setPackageKey,
-- ** Parsing DynFlags
parseDynamicFlagsCmdLine,
@@ -187,6 +190,8 @@ import Data.Word
import System.FilePath
import System.IO
import System.IO.Error
+import Text.ParserCombinators.ReadP hiding (char)
+import Text.ParserCombinators.ReadP as R
import Data.IntSet (IntSet)
import qualified Data.IntSet as IntSet
@@ -240,6 +245,7 @@ data DumpFlag
| Opt_D_dump_spec
| Opt_D_dump_prep
| Opt_D_dump_stg
+ | Opt_D_dump_call_arity
| Opt_D_dump_stranal
| Opt_D_dump_strsigs
| Opt_D_dump_tc
@@ -265,8 +271,10 @@ data DumpFlag
| Opt_D_dump_hi
| Opt_D_dump_hi_diffs
| Opt_D_dump_mod_cycles
+ | Opt_D_dump_mod_map
| Opt_D_dump_view_pattern_commoning
| Opt_D_verbose_core2core
+ | Opt_D_dump_debug
deriving (Eq, Show, Enum)
@@ -288,6 +296,7 @@ data GeneralFlag
| Opt_PrintExplicitKinds
-- optimisation opts
+ | Opt_CallArity
| Opt_Strictness
| Opt_LateDmdAnal
| Opt_KillAbsence
@@ -328,6 +337,7 @@ data GeneralFlag
| Opt_IgnoreInterfacePragmas
| Opt_OmitInterfacePragmas
| Opt_ExposeAllUnfoldings
+ | Opt_WriteInterface -- forces .hi files to be written even with -fno-code
-- profiling opts
| Opt_AutoSccsOnIndividualCafs
@@ -382,6 +392,7 @@ data GeneralFlag
| Opt_ErrorSpans -- Include full span info in error messages,
-- instead of just the start position.
| Opt_PprCaseAsLet
+ | Opt_PprShowTicks
-- Suppress all coercions, them replacing with '...'
| Opt_SuppressCoercions
@@ -401,8 +412,6 @@ data GeneralFlag
| Opt_SuppressUniques
-- temporary flags
- | Opt_RunCPS
- | Opt_RunCPSZ
| Opt_AutoLinkPackages
| Opt_ImplicitImportQualified
@@ -420,6 +429,10 @@ data GeneralFlag
| Opt_DistrustAllPackages
| Opt_PackageTrust
+ -- debugging flags
+ | Opt_Debug
+ | Opt_DebugCore
+
deriving (Eq, Show, Enum)
data WarningFlag =
@@ -455,7 +468,6 @@ data WarningFlag =
| Opt_WarnTabs
| Opt_WarnUnrecognisedPragmas
| Opt_WarnDodgyForeignImports
- | Opt_WarnLazyUnliftedBindings
| Opt_WarnUnusedDoBind
| Opt_WarnWrongDoBind
| Opt_WarnAlternativeLayoutRuleTransitional
@@ -477,7 +489,6 @@ data SafeHaskellMode
| Sf_Unsafe
| Sf_Trustworthy
| Sf_Safe
- | Sf_SafeInferred
deriving (Eq)
instance Show SafeHaskellMode where
@@ -485,7 +496,6 @@ instance Show SafeHaskellMode where
show Sf_Unsafe = "Unsafe"
show Sf_Trustworthy = "Trustworthy"
show Sf_Safe = "Safe"
- show Sf_SafeInferred = "Safe-Inferred"
instance Outputable SafeHaskellMode where
ppr = text . show
@@ -579,6 +589,7 @@ data ExtensionFlag
| Opt_TraditionalRecordSyntax
| Opt_LambdaCase
| Opt_MultiWayIf
+ | Opt_BinaryLiterals
| Opt_NegativeLiterals
| Opt_EmptyCase
| Opt_PatternSynonyms
@@ -626,7 +637,7 @@ data DynFlags = DynFlags {
ctxtStkDepth :: Int, -- ^ Typechecker context stack depth
tyFunStkDepth :: Int, -- ^ Typechecker type function stack depth
- thisPackage :: PackageId, -- ^ name of package currently being compiled
+ thisPackage :: PackageKey, -- ^ name of package currently being compiled
-- ways
ways :: [Way], -- ^ Way flags from the command line
@@ -733,11 +744,14 @@ data DynFlags = DynFlags {
language :: Maybe Language,
-- | Safe Haskell mode
safeHaskell :: SafeHaskellMode,
+ safeInfer :: Bool,
+ safeInferred :: Bool,
-- We store the location of where some extension and flags were turned on so
-- we can produce accurate error messages when Safe Haskell fails due to
-- them.
thOnLoc :: SrcSpan,
newDerivOnLoc :: SrcSpan,
+ overlapInstLoc :: SrcSpan,
pkgTrustOnLoc :: SrcSpan,
warnSafeOnLoc :: SrcSpan,
warnUnsafeOnLoc :: SrcSpan,
@@ -773,7 +787,7 @@ data DynFlags = DynFlags {
pprCols :: Int,
traceLevel :: Int, -- Standard level is 1. Less verbose is 0.
- useUnicodeQuotes :: Bool,
+ useUnicode :: Bool,
-- | what kind of {-# SCC #-} to add automatically
profAuto :: ProfAuto,
@@ -797,8 +811,21 @@ data DynFlags = DynFlags {
rtldInfo :: IORef (Maybe LinkerInfo),
-- | Run-time compiler information
- rtccInfo :: IORef (Maybe CompilerInfo)
- }
+ rtccInfo :: IORef (Maybe CompilerInfo),
+
+ -- Constants used to control the amount of optimization done.
+
+ -- | Max size, in bytes, of inline array allocations.
+ maxInlineAllocSize :: Int,
+
+ -- | Only inline memcpy if it generates no more than this many
+ -- pseudo (roughly: Cmm) instructions.
+ maxInlineMemcpyInsns :: Int,
+
+ -- | Only inline memset if it generates no more than this many
+ -- pseudo (roughly: Cmm) instructions.
+ maxInlineMemsetInsns :: Int
+}
class HasDynFlags m where
getDynFlags :: m DynFlags
@@ -813,7 +840,7 @@ data ProfAuto
| ProfAutoTop -- ^ top-level functions annotated only
| ProfAutoExports -- ^ exported functions annotated only
| ProfAutoCalls -- ^ annotate call-sites
- deriving (Enum)
+ deriving (Eq,Enum)
data Settings = Settings {
sTargetPlatform :: Platform, -- Filled in by SysTools
@@ -1002,9 +1029,15 @@ isNoLink :: GhcLink -> Bool
isNoLink NoLink = True
isNoLink _ = False
+data PackageArg = PackageArg String
+ | PackageIdArg String
+ | PackageKeyArg String
+ deriving (Eq, Show)
+
+type ModRenaming = Maybe [(String, String)]
+
data PackageFlag
- = ExposePackage String
- | ExposePackageId String
+ = ExposePackage PackageArg ModRenaming
| HidePackage String
| IgnorePackage String
| TrustPackage String
@@ -1198,7 +1231,6 @@ wayOptl platform WayThreaded =
-- the problems are our fault or theirs, but it seems that using the
-- alternative 1:1 threading library libthr works around it:
OSFreeBSD -> ["-lthr"]
- OSSolaris2 -> ["-lrt"]
OSOpenBSD -> ["-pthread"]
OSNetBSD -> ["-pthread"]
_ -> []
@@ -1278,12 +1310,12 @@ initDynFlags dflags = do
refRtldInfo <- newIORef Nothing
refRtccInfo <- newIORef Nothing
wrapperNum <- newIORef emptyModuleEnv
- canUseUnicodeQuotes <- do let enc = localeEncoding
- str = "‛’"
- (withCString enc str $ \cstr ->
- do str' <- peekCString enc cstr
- return (str == str'))
- `catchIOError` \_ -> return False
+ canUseUnicode <- do let enc = localeEncoding
+ str = "‘’"
+ (withCString enc str $ \cstr ->
+ do str' <- peekCString enc cstr
+ return (str == str'))
+ `catchIOError` \_ -> return False
return dflags{
canGenerateDynamicToo = refCanGenerateDynamicToo,
nextTempSuffix = refNextTempSuffix,
@@ -1293,7 +1325,7 @@ initDynFlags dflags = do
generatedDumps = refGeneratedDumps,
llvmVersion = refLlvmVersion,
nextWrapperNum = wrapperNum,
- useUnicodeQuotes = canUseUnicodeQuotes,
+ useUnicode = canUseUnicode,
rtldInfo = refRtldInfo,
rtccInfo = refRtccInfo
}
@@ -1335,7 +1367,7 @@ defaultDynFlags mySettings =
ctxtStkDepth = mAX_CONTEXT_REDUCTION_DEPTH,
tyFunStkDepth = mAX_TYPE_FUNCTION_REDUCTION_DEPTH,
- thisPackage = mainPackageId,
+ thisPackage = mainPackageKey,
objectDir = Nothing,
dylibInstallName = Nothing,
@@ -1400,9 +1432,12 @@ defaultDynFlags mySettings =
warningFlags = IntSet.fromList (map fromEnum standardWarnings),
ghciScripts = [],
language = Nothing,
- safeHaskell = Sf_SafeInferred,
+ safeHaskell = Sf_None,
+ safeInfer = True,
+ safeInferred = True,
thOnLoc = noSrcSpan,
newDerivOnLoc = noSrcSpan,
+ overlapInstLoc = noSrcSpan,
pkgTrustOnLoc = noSrcSpan,
warnSafeOnLoc = noSrcSpan,
warnUnsafeOnLoc = noSrcSpan,
@@ -1432,7 +1467,7 @@ defaultDynFlags mySettings =
flushErr = defaultFlushErr,
pprUserLength = 5,
pprCols = 100,
- useUnicodeQuotes = False,
+ useUnicode = False,
traceLevel = 1,
profAuto = NoProfAuto,
llvmVersion = panic "defaultDynFlags: No llvmVersion",
@@ -1446,7 +1481,11 @@ defaultDynFlags mySettings =
avx512f = False,
avx512pf = False,
rtldInfo = panic "defaultDynFlags: no rtldInfo",
- rtccInfo = panic "defaultDynFlags: no rtccInfo"
+ rtccInfo = panic "defaultDynFlags: no rtccInfo",
+
+ maxInlineAllocSize = 128,
+ maxInlineMemcpyInsns = 32,
+ maxInlineMemsetInsns = 32
}
defaultWays :: Settings -> [Way]
@@ -1605,6 +1644,7 @@ dopt f dflags = (fromEnum f `IntSet.member` dumpFlags dflags)
enableIfVerbose Opt_D_dump_ticked = False
enableIfVerbose Opt_D_dump_view_pattern_commoning = False
enableIfVerbose Opt_D_dump_mod_cycles = False
+ enableIfVerbose Opt_D_dump_mod_map = False
enableIfVerbose _ = True
-- | Set a 'DumpFlag'
@@ -1664,6 +1704,9 @@ lang_set dflags lang =
extensionFlags = flattenExtensionFlags lang (extensions dflags)
}
+useUnicodeSyntax :: DynFlags -> Bool
+useUnicodeSyntax = xopt Opt_UnicodeSyntax
+
-- | Set the Haskell language standard to use
setLanguage :: Language -> DynP ()
setLanguage l = upd (`lang_set` Just l)
@@ -1678,7 +1721,7 @@ packageTrustOn = gopt Opt_PackageTrust
-- | Is Safe Haskell on in some way (including inference mode)
safeHaskellOn :: DynFlags -> Bool
-safeHaskellOn dflags = safeHaskell dflags /= Sf_None
+safeHaskellOn dflags = safeHaskell dflags /= Sf_None || safeInferOn dflags
-- | Is the Safe Haskell safe language in use
safeLanguageOn :: DynFlags -> Bool
@@ -1686,7 +1729,7 @@ safeLanguageOn dflags = safeHaskell dflags == Sf_Safe
-- | Is the Safe Haskell safe inference mode active
safeInferOn :: DynFlags -> Bool
-safeInferOn dflags = safeHaskell dflags == Sf_SafeInferred
+safeInferOn = safeInfer
-- | Test if Safe Imports are on in some form
safeImportsOn :: DynFlags -> Bool
@@ -1700,7 +1743,11 @@ setSafeHaskell s = updM f
where f dfs = do
let sf = safeHaskell dfs
safeM <- combineSafeFlags sf s
- return $ dfs { safeHaskell = safeM }
+ return $ case (s == Sf_Safe || s == Sf_Unsafe) of
+ True -> dfs { safeHaskell = safeM, safeInfer = False }
+ -- leave safe inferrence on in Trustworthy mode so we can warn
+ -- if it could have been inferred safe.
+ False -> dfs { safeHaskell = safeM }
-- | Are all direct imports required to be safe for this Safe Haskell mode?
-- Direct imports are when the code explicitly imports a module
@@ -1717,9 +1764,7 @@ safeImplicitImpsReq d = safeLanguageOn d
-- want to export this functionality from the module but do want to export the
-- type constructors.
combineSafeFlags :: SafeHaskellMode -> SafeHaskellMode -> DynP SafeHaskellMode
-combineSafeFlags a b | a == Sf_SafeInferred = return b
- | b == Sf_SafeInferred = return a
- | a == Sf_None = return b
+combineSafeFlags a b | a == Sf_None = return b
| b == Sf_None = return a
| a == b = return a
| otherwise = addErr errm >> return (panic errm)
@@ -1731,13 +1776,19 @@ combineSafeFlags a b | a == Sf_SafeInferred = return b
-- * function to get srcspan that enabled the flag
-- * function to test if the flag is on
-- * function to turn the flag off
-unsafeFlags :: [(String, DynFlags -> SrcSpan, DynFlags -> Bool, DynFlags -> DynFlags)]
+unsafeFlags, unsafeFlagsForInfer
+ :: [(String, DynFlags -> SrcSpan, DynFlags -> Bool, DynFlags -> DynFlags)]
unsafeFlags = [("-XGeneralizedNewtypeDeriving", newDerivOnLoc,
xopt Opt_GeneralizedNewtypeDeriving,
flip xopt_unset Opt_GeneralizedNewtypeDeriving),
("-XTemplateHaskell", thOnLoc,
xopt Opt_TemplateHaskell,
flip xopt_unset Opt_TemplateHaskell)]
+unsafeFlagsForInfer = unsafeFlags ++
+ -- TODO: Can we do better than this for inference?
+ [("-XOverlappingInstances", overlapInstLoc,
+ xopt Opt_OverlappingInstances,
+ flip xopt_unset Opt_OverlappingInstances)]
-- | Retrieve the options corresponding to a particular @opt_*@ field in the correct order
getOpts :: DynFlags -- ^ 'DynFlags' to retrieve the options from
@@ -2019,43 +2070,41 @@ updateWays dflags
-- The bool is to indicate if we are parsing command line flags (false means
-- file pragma). This allows us to generate better warnings.
safeFlagCheck :: Bool -> DynFlags -> (DynFlags, [Located String])
-safeFlagCheck _ dflags | not (safeLanguageOn dflags || safeInferOn dflags)
- = (dflags, [])
-
--- safe or safe-infer ON
-safeFlagCheck cmdl dflags =
- case safeLanguageOn dflags of
- True -> (dflags', warns)
+safeFlagCheck _ dflags | safeLanguageOn dflags = (dflagsUnset, warns)
+ where
+ -- Handle illegal flags under safe language.
+ (dflagsUnset, warns) = foldl check_method (dflags, []) unsafeFlags
- -- throw error if -fpackage-trust by itself with no safe haskell flag
- False | not cmdl && packageTrustOn dflags
- -> (gopt_unset dflags' Opt_PackageTrust,
- [L (pkgTrustOnLoc dflags') $
- "-fpackage-trust ignored;" ++
- " must be specified with a Safe Haskell flag"]
- )
+ check_method (df, warns) (str,loc,test,fix)
+ | test df = (fix df, warns ++ safeFailure (loc df) str)
+ | otherwise = (df, warns)
- False | null warns && safeInfOk
- -> (dflags', [])
+ safeFailure loc str
+ = [L loc $ str ++ " is not allowed in Safe Haskell; ignoring "
+ ++ str]
- | otherwise
- -> (dflags' { safeHaskell = Sf_None }, [])
- -- Have we inferred Unsafe?
- -- See Note [HscMain . Safe Haskell Inference]
- where
- -- TODO: Can we do better than this for inference?
- safeInfOk = not $ xopt Opt_OverlappingInstances dflags
+safeFlagCheck cmdl dflags =
+ case (safeInferOn dflags) of
+ True | safeFlags -> (dflags', warn)
+ True -> (dflags' { safeInferred = False }, warn)
+ False -> (dflags', warn)
- (dflags', warns) = foldl check_method (dflags, []) unsafeFlags
+ where
+ -- dynflags and warn for when -fpackage-trust by itself with no safe
+ -- haskell flag
+ (dflags', warn)
+ | safeHaskell dflags == Sf_None && not cmdl && packageTrustOn dflags
+ = (gopt_unset dflags Opt_PackageTrust, pkgWarnMsg)
+ | otherwise = (dflags, [])
- check_method (df, warns) (str,loc,test,fix)
- | test df = (apFix fix df, warns ++ safeFailure (loc dflags) str)
- | otherwise = (df, warns)
+ pkgWarnMsg = [L (pkgTrustOnLoc dflags') $
+ "-fpackage-trust ignored;" ++
+ " must be specified with a Safe Haskell flag"]
- apFix f = if safeInferOn dflags then id else f
+ safeFlags = all (\(_,_,t,_) -> not $ t dflags) unsafeFlagsForInfer
+ -- Have we inferred Unsafe?
+ -- See Note [HscMain . Safe Haskell Inference]
- safeFailure loc str
- = [L loc $ str ++ " is not allowed in Safe Haskell; ignoring " ++ str]
{- **********************************************************************
%* *
@@ -2144,7 +2193,6 @@ dynamic_flags = [
, Flag "pgmP" (hasArg setPgmP)
, Flag "pgmF" (hasArg (\f -> alterSettings (\s -> s { sPgm_F = f})))
, Flag "pgmc" (hasArg (\f -> alterSettings (\s -> s { sPgm_c = (f,[])})))
- , Flag "pgmm" (HasArg (\_ -> addWarn "The -pgmm flag does nothing; it will be removed in a future GHC release"))
, Flag "pgms" (hasArg (\f -> alterSettings (\s -> s { sPgm_s = (f,[])})))
, Flag "pgma" (hasArg (\f -> alterSettings (\s -> s { sPgm_a = (f,[])})))
, Flag "pgml" (hasArg (\f -> alterSettings (\s -> s { sPgm_l = (f,[])})))
@@ -2159,7 +2207,6 @@ dynamic_flags = [
, Flag "optP" (hasArg addOptP)
, Flag "optF" (hasArg (\f -> alterSettings (\s -> s { sOpt_F = f : sOpt_F s})))
, Flag "optc" (hasArg addOptc)
- , Flag "optm" (HasArg (\_ -> addWarn "The -optm flag does nothing; it will be removed in a future GHC release"))
, Flag "opta" (hasArg (\f -> alterSettings (\s -> s { sOpt_a = f : sOpt_a s})))
, Flag "optl" (hasArg addOptl)
, Flag "optwindres" (hasArg (\f -> alterSettings (\s -> s { sOpt_windres = f : sOpt_windres s})))
@@ -2171,16 +2218,9 @@ dynamic_flags = [
-------- ghc -M -----------------------------------------------------
, Flag "dep-suffix" (hasArg addDepSuffix)
- , Flag "optdep-s" (hasArgDF addDepSuffix "Use -dep-suffix instead")
, Flag "dep-makefile" (hasArg setDepMakefile)
- , Flag "optdep-f" (hasArgDF setDepMakefile "Use -dep-makefile instead")
- , Flag "optdep-w" (NoArg (deprecate "doesn't do anything"))
, Flag "include-pkg-deps" (noArg (setDepIncludePkgDeps True))
- , Flag "optdep--include-prelude" (noArgDF (setDepIncludePkgDeps True) "Use -include-pkg-deps instead")
- , Flag "optdep--include-pkg-deps" (noArgDF (setDepIncludePkgDeps True) "Use -include-pkg-deps instead")
, Flag "exclude-module" (hasArg addDepExcludeMod)
- , Flag "optdep--exclude-module" (hasArgDF addDepExcludeMod "Use -exclude-module instead")
- , Flag "optdep-x" (hasArgDF addDepExcludeMod "Use -exclude-module instead")
-------- Linking ----------------------------------------------------
, Flag "no-link" (noArg (\d -> d{ ghcLink=NoLink }))
@@ -2225,8 +2265,6 @@ dynamic_flags = [
, Flag "keep-hc-files" (NoArg (setGeneralFlag Opt_KeepHcFiles))
, Flag "keep-s-file" (NoArg (setGeneralFlag Opt_KeepSFiles))
, Flag "keep-s-files" (NoArg (setGeneralFlag Opt_KeepSFiles))
- , Flag "keep-raw-s-file" (NoArg (addWarn "The -keep-raw-s-file flag does nothing; it will be removed in a future GHC release"))
- , Flag "keep-raw-s-files" (NoArg (addWarn "The -keep-raw-s-files flag does nothing; it will be removed in a future GHC release"))
, Flag "keep-llvm-file" (NoArg (do setObjTarget HscLlvm
setGeneralFlag Opt_KeepLlvmFiles))
, Flag "keep-llvm-files" (NoArg (do setObjTarget HscLlvm
@@ -2322,6 +2360,7 @@ dynamic_flags = [
, Flag "ddump-spec" (setDumpFlag Opt_D_dump_spec)
, Flag "ddump-prep" (setDumpFlag Opt_D_dump_prep)
, Flag "ddump-stg" (setDumpFlag Opt_D_dump_stg)
+ , Flag "ddump-call-arity" (setDumpFlag Opt_D_dump_call_arity)
, Flag "ddump-stranal" (setDumpFlag Opt_D_dump_stranal)
, Flag "ddump-strsigs" (setDumpFlag Opt_D_dump_strsigs)
, Flag "ddump-tc" (setDumpFlag Opt_D_dump_tc)
@@ -2350,6 +2389,7 @@ dynamic_flags = [
, Flag "ddump-hpc" (setDumpFlag Opt_D_dump_ticked) -- back compat
, Flag "ddump-ticked" (setDumpFlag Opt_D_dump_ticked)
, Flag "ddump-mod-cycles" (setDumpFlag Opt_D_dump_mod_cycles)
+ , Flag "ddump-mod-map" (setDumpFlag Opt_D_dump_mod_map)
, Flag "ddump-view-pattern-commoning" (setDumpFlag Opt_D_dump_view_pattern_commoning)
, Flag "ddump-to-file" (NoArg (setGeneralFlag Opt_DumpToFile))
, Flag "ddump-hi-diffs" (setDumpFlag Opt_D_dump_hi_diffs)
@@ -2362,12 +2402,10 @@ dynamic_flags = [
setVerbosity $ Just 2))
, Flag "dfaststring-stats" (NoArg (setGeneralFlag Opt_D_faststring_stats))
, Flag "dno-llvm-mangler" (NoArg (setGeneralFlag Opt_NoLlvmMangler)) -- hidden flag
+ , Flag "ddump-debug" (setDumpFlag Opt_D_dump_debug)
------ Machine dependant (-m) stuff ---------------------------
- , Flag "monly-2-regs" (NoArg (addWarn "The -monly-2-regs flag does nothing; it will be removed in a future GHC release"))
- , Flag "monly-3-regs" (NoArg (addWarn "The -monly-3-regs flag does nothing; it will be removed in a future GHC release"))
- , Flag "monly-4-regs" (NoArg (addWarn "The -monly-4-regs flag does nothing; it will be removed in a future GHC release"))
, Flag "msse" (versionSuffix (\maj min d -> d{ sseVersion = Just (maj, min) }))
, Flag "mavx" (noArg (\d -> d{ avx = True }))
, Flag "mavx2" (noArg (\d -> d{ avx2 = True }))
@@ -2428,6 +2466,9 @@ dynamic_flags = [
, Flag "fmax-worker-args" (intSuffix (\n d -> d {maxWorkerArgs = n}))
, Flag "fghci-hist-size" (intSuffix (\n d -> d {ghciHistSize = n}))
+ , Flag "fmax-inline-alloc-size" (intSuffix (\n d -> d{ maxInlineAllocSize = n }))
+ , Flag "fmax-inline-memcpy-insns" (intSuffix (\n d -> d{ maxInlineMemcpyInsns = n }))
+ , Flag "fmax-inline-memset-insns" (intSuffix (\n d -> d{ maxInlineMemsetInsns = n }))
------ Profiling ----------------------------------------------------
@@ -2464,9 +2505,14 @@ dynamic_flags = [
------ Safe Haskell flags -------------------------------------------
, Flag "fpackage-trust" (NoArg setPackageTrust)
- , Flag "fno-safe-infer" (NoArg (setSafeHaskell Sf_None))
+ , Flag "fno-safe-infer" (noArg (\d -> d { safeInfer = False } ))
, Flag "fPIC" (NoArg (setGeneralFlag Opt_PIC))
, Flag "fno-PIC" (NoArg (unSetGeneralFlag Opt_PIC))
+
+ ------ Debugging flags ----------------------------------------------
+ , Flag "g" (NoArg (setGeneralFlag Opt_Debug))
+ , Flag "fsave-core" (NoArg (setGeneralFlag Opt_DebugCore))
+
]
++ map (mkFlag turnOn "" setGeneralFlag ) negatableFlags
++ map (mkFlag turnOff "no-" unSetGeneralFlag) negatableFlags
@@ -2503,9 +2549,13 @@ package_flags = [
removeUserPkgConf
deprecate "Use -no-user-package-db instead")
- , Flag "package-name" (hasArg setPackageName)
+ , Flag "package-name" (HasArg $ \name -> do
+ upd (setPackageKey name)
+ deprecate "Use -this-package-key instead")
+ , Flag "this-package-key" (hasArg setPackageKey)
, Flag "package-id" (HasArg exposePackageId)
, Flag "package" (HasArg exposePackage)
+ , Flag "package-key" (HasArg exposePackageKey)
, Flag "hide-package" (HasArg hidePackage)
, Flag "hide-all-packages" (NoArg (setGeneralFlag Opt_HideAllPackages))
, Flag "ignore-package" (HasArg ignorePackage)
@@ -2581,15 +2631,14 @@ fWarningFlags = [
( "warn-warnings-deprecations", Opt_WarnWarningsDeprecations, nop ),
( "warn-deprecations", Opt_WarnWarningsDeprecations, nop ),
( "warn-deprecated-flags", Opt_WarnDeprecatedFlags, nop ),
- ( "warn-amp", Opt_WarnAMP, nop ),
+ ( "warn-amp", Opt_WarnAMP,
+ \_ -> deprecate "it has no effect, and will be removed in GHC 7.12" ),
( "warn-orphans", Opt_WarnOrphans, nop ),
( "warn-identities", Opt_WarnIdentities, nop ),
( "warn-auto-orphans", Opt_WarnAutoOrphans, nop ),
( "warn-tabs", Opt_WarnTabs, nop ),
( "warn-typed-holes", Opt_WarnTypedHoles, nop ),
( "warn-unrecognised-pragmas", Opt_WarnUnrecognisedPragmas, nop ),
- ( "warn-lazy-unlifted-bindings", Opt_WarnLazyUnliftedBindings,
- \_ -> deprecate "it has no effect, and will be removed in GHC 7.10" ),
( "warn-unused-do-bind", Opt_WarnUnusedDoBind, nop ),
( "warn-wrong-do-bind", Opt_WarnWrongDoBind, nop ),
( "warn-alternative-layout-rule-transitional", Opt_WarnAlternativeLayoutRuleTransitional, nop ),
@@ -2615,7 +2664,8 @@ dFlags = [
( "suppress-idinfo", Opt_SuppressIdInfo, nop),
( "suppress-type-signatures", Opt_SuppressTypeSignatures, nop),
( "suppress-uniques", Opt_SuppressUniques, nop),
- ( "ppr-case-as-let", Opt_PprCaseAsLet, nop)]
+ ( "ppr-case-as-let", Opt_PprCaseAsLet, nop),
+ ( "ppr-ticks", Opt_PprShowTicks, nop)]
-- | These @-f\@ flags can all be reversed with @-fno-\@
fFlags :: [FlagSpec GeneralFlag]
@@ -2623,6 +2673,7 @@ fFlags = [
( "error-spans", Opt_ErrorSpans, nop ),
( "print-explicit-foralls", Opt_PrintExplicitForalls, nop ),
( "print-explicit-kinds", Opt_PrintExplicitKinds, nop ),
+ ( "call-arity", Opt_CallArity, nop ),
( "strictness", Opt_Strictness, nop ),
( "late-dmd-anal", Opt_LateDmdAnal, nop ),
( "specialise", Opt_Specialise, nop ),
@@ -2635,6 +2686,7 @@ fFlags = [
( "pedantic-bottoms", Opt_PedanticBottoms, nop ),
( "ignore-interface-pragmas", Opt_IgnoreInterfacePragmas, nop ),
( "omit-interface-pragmas", Opt_OmitInterfacePragmas, nop ),
+ ( "write-interface", Opt_WriteInterface, nop ),
( "expose-all-unfoldings", Opt_ExposeAllUnfoldings, nop ),
( "do-lambda-eta-expansion", Opt_DoLambdaEtaExpansion, nop ),
( "ignore-asserts", Opt_IgnoreAsserts, nop ),
@@ -2654,8 +2706,6 @@ fFlags = [
( "break-on-error", Opt_BreakOnError, nop ),
( "print-evld-with-show", Opt_PrintEvldWithShow, nop ),
( "print-bind-contents", Opt_PrintBindContents, nop ),
- ( "run-cps", Opt_RunCPS, nop ),
- ( "run-cpsz", Opt_RunCPSZ, nop ),
( "vectorise", Opt_Vectorise, nop ),
( "vectorisation-avoidance", Opt_VectorisationAvoidance, nop ),
( "regs-graph", Opt_RegsGraph, nop ),
@@ -2670,7 +2720,8 @@ fFlags = [
( "fun-to-thunk", Opt_FunToThunk, nop ),
( "gen-manifest", Opt_GenManifest, nop ),
( "embed-manifest", Opt_EmbedManifest, nop ),
- ( "ext-core", Opt_EmitExternalCore, nop ),
+ ( "ext-core", Opt_EmitExternalCore,
+ \_ -> deprecate "it has no effect, and will be removed in GHC 7.12" ),
( "shared-implib", Opt_SharedImplib, nop ),
( "ghci-sandbox", Opt_GhciSandbox, nop ),
( "ghci-history", Opt_GhciHistory, nop ),
@@ -2854,13 +2905,17 @@ xFlags = [
( "FlexibleInstances", Opt_FlexibleInstances, nop ),
( "ConstrainedClassMethods", Opt_ConstrainedClassMethods, nop ),
( "MultiParamTypeClasses", Opt_MultiParamTypeClasses, nop ),
- ( "NullaryTypeClasses", Opt_NullaryTypeClasses, nop ),
+ ( "NullaryTypeClasses", Opt_NullaryTypeClasses,
+ deprecatedForExtension "MultiParamTypeClasses" ),
( "FunctionalDependencies", Opt_FunctionalDependencies, nop ),
( "GeneralizedNewtypeDeriving", Opt_GeneralizedNewtypeDeriving, setGenDeriving ),
- ( "OverlappingInstances", Opt_OverlappingInstances, nop ),
+ ( "OverlappingInstances", Opt_OverlappingInstances,
+ \ turn_on -> when turn_on
+ $ deprecate "instead use per-instance pragmas OVERLAPPING/OVERLAPPABLE/OVERLAPS" ),
( "UndecidableInstances", Opt_UndecidableInstances, nop ),
( "IncoherentInstances", Opt_IncoherentInstances, nop ),
( "PackageImports", Opt_PackageImports, nop ),
+ ( "BinaryLiterals", Opt_BinaryLiterals, nop ),
( "NegativeLiterals", Opt_NegativeLiterals, nop ),
( "EmptyCase", Opt_EmptyCase, nop ),
( "PatternSynonyms", Opt_PatternSynonyms, nop )
@@ -2945,6 +3000,9 @@ impliedFlags
, (Opt_ImplicitParams, turnOn, Opt_FlexibleInstances)
, (Opt_JavaScriptFFI, turnOn, Opt_InterruptibleFFI)
+
+ , (Opt_DeriveTraversable, turnOn, Opt_DeriveFunctor)
+ , (Opt_DeriveTraversable, turnOn, Opt_DeriveFoldable)
]
optLevelFlags :: [([Int], GeneralFlag)]
@@ -2957,6 +3015,7 @@ optLevelFlags
-- in PrelRules
, ([1,2], Opt_DoEtaReduction)
, ([1,2], Opt_CaseMerge)
+ , ([1,2], Opt_CallArity)
, ([1,2], Opt_Strictness)
, ([1,2], Opt_CSE)
, ([1,2], Opt_FullLaziness)
@@ -3001,7 +3060,6 @@ standardWarnings
= [ Opt_WarnOverlappingPatterns,
Opt_WarnWarningsDeprecations,
Opt_WarnDeprecatedFlags,
- Opt_WarnAMP,
Opt_WarnTypedHoles,
Opt_WarnUnrecognisedPragmas,
Opt_WarnPointlessPragmas,
@@ -3172,16 +3230,9 @@ noArg fn = NoArg (upd fn)
noArgM :: (DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags)
noArgM fn = NoArg (updM fn)
-noArgDF :: (DynFlags -> DynFlags) -> String -> OptKind (CmdLineP DynFlags)
-noArgDF fn deprec = NoArg (upd fn >> deprecate deprec)
-
hasArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags)
hasArg fn = HasArg (upd . fn)
-hasArgDF :: (String -> DynFlags -> DynFlags) -> String -> OptKind (CmdLineP DynFlags)
-hasArgDF fn deprec = HasArg (\s -> do upd (fn s)
- deprecate deprec)
-
sepArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags)
sepArg fn = SepArg (upd . fn)
@@ -3315,11 +3366,39 @@ removeGlobalPkgConf = upd $ \s -> s { extraPkgConfs = filter isNotGlobal . extra
clearPkgConf :: DynP ()
clearPkgConf = upd $ \s -> s { extraPkgConfs = const [] }
-exposePackage, exposePackageId, hidePackage, ignorePackage,
+parsePackageFlag :: (String -> PackageArg) -- type of argument
+ -> String -- string to parse
+ -> PackageFlag
+parsePackageFlag constr str = case filter ((=="").snd) (readP_to_S parse str) of
+ [(r, "")] -> r
+ _ -> throwGhcException $ CmdLineError ("Can't parse package flag: " ++ str)
+ where parse = do
+ pkg <- munch1 (\c -> isAlphaNum c || c `elem` ":-_.")
+ (do _ <- tok $ R.char '('
+ rns <- tok $ sepBy parseItem (tok $ R.char ',')
+ _ <- tok $ R.char ')'
+ return (ExposePackage (constr pkg) (Just rns))
+ +++
+ return (ExposePackage (constr pkg) Nothing))
+ parseMod = munch1 (\c -> isAlphaNum c || c `elem` ".")
+ parseItem = do
+ orig <- tok $ parseMod
+ (do _ <- tok $ string "as"
+ new <- tok $ parseMod
+ return (orig, new)
+ +++
+ return (orig, orig))
+ tok m = skipSpaces >> m
+
+exposePackage, exposePackageId, exposePackageKey, hidePackage, ignorePackage,
trustPackage, distrustPackage :: String -> DynP ()
exposePackage p = upd (exposePackage' p)
exposePackageId p =
- upd (\s -> s{ packageFlags = ExposePackageId p : packageFlags s })
+ upd (\s -> s{ packageFlags =
+ parsePackageFlag PackageIdArg p : packageFlags s })
+exposePackageKey p =
+ upd (\s -> s{ packageFlags =
+ parsePackageFlag PackageKeyArg p : packageFlags s })
hidePackage p =
upd (\s -> s{ packageFlags = HidePackage p : packageFlags s })
ignorePackage p =
@@ -3331,10 +3410,11 @@ distrustPackage p = exposePackage p >>
exposePackage' :: String -> DynFlags -> DynFlags
exposePackage' p dflags
- = dflags { packageFlags = ExposePackage p : packageFlags dflags }
+ = dflags { packageFlags =
+ parsePackageFlag PackageArg p : packageFlags dflags }
-setPackageName :: String -> DynFlags -> DynFlags
-setPackageName p s = s{ thisPackage = stringToPackageId p }
+setPackageKey :: String -> DynFlags -> DynFlags
+setPackageKey p s = s{ thisPackage = stringToPackageKey p }
-- If we're linking a binary, then only targets that produce object
-- code are allowed (requests for other target types are ignored).
@@ -3386,10 +3466,10 @@ setMainIs arg
| not (null main_fn) && isLower (head main_fn)
-- The arg looked like "Foo.Bar.baz"
= upd $ \d -> d{ mainFunIs = Just main_fn,
- mainModIs = mkModule mainPackageId (mkModuleName main_mod) }
+ mainModIs = mkModule mainPackageKey (mkModuleName main_mod) }
| isUpper (head arg) -- The arg looked like "Foo" or "Foo.Bar"
- = upd $ \d -> d{ mainModIs = mkModule mainPackageId (mkModuleName arg) }
+ = upd $ \d -> d{ mainModIs = mkModule mainPackageKey (mkModuleName arg) }
| otherwise -- The arg looked like "baz"
= upd $ \d -> d{ mainFunIs = Just arg }
@@ -3523,10 +3603,10 @@ picCCOpts dflags
-- Don't generate "common" symbols - these are unwanted
-- in dynamic libraries.
- | gopt Opt_PIC dflags -> ["-fno-common", "-U __PIC__", "-D__PIC__"]
+ | gopt Opt_PIC dflags -> ["-fno-common", "-U__PIC__", "-D__PIC__"]
| otherwise -> ["-mdynamic-no-pic"]
OSMinGW32 -- no -fPIC for Windows
- | gopt Opt_PIC dflags -> ["-U __PIC__", "-D__PIC__"]
+ | gopt Opt_PIC dflags -> ["-U__PIC__", "-D__PIC__"]
| otherwise -> []
_
-- we need -fPIC for C files when we are compiling with -dynamic,
@@ -3535,12 +3615,12 @@ picCCOpts dflags
-- objects, but can't without -fPIC. See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/PositionIndependentCode
| gopt Opt_PIC dflags || not (gopt Opt_Static dflags) ->
- ["-fPIC", "-U __PIC__", "-D__PIC__"]
+ ["-fPIC", "-U__PIC__", "-D__PIC__"]
| otherwise -> []
picPOpts :: DynFlags -> [String]
picPOpts dflags
- | gopt Opt_PIC dflags = ["-U __PIC__", "-D__PIC__"]
+ | gopt Opt_PIC dflags = ["-U__PIC__", "-D__PIC__"]
| otherwise = []
-- -----------------------------------------------------------------------------
@@ -3576,6 +3656,8 @@ compilerInfo dflags
("RTS ways", cGhcRTSWays),
("Support dynamic-too", if isWindows then "NO" else "YES"),
("Support parallel --make", "YES"),
+ ("Support reexported-modules", "YES"),
+ ("Uses package keys", "YES"),
("Dynamic by default", if dYNAMIC_BY_DEFAULT dflags
then "YES" else "NO"),
("GHC Dynamic", if dynamicGhc
@@ -3749,6 +3831,8 @@ data LinkerInfo
data CompilerInfo
= GCC
| Clang
+ | AppleClang
+ | AppleClang51
| UnknownCC
deriving Eq
diff --git a/compiler/main/DynFlags.hs-boot b/compiler/main/DynFlags.hs-boot
index 04ec5a4e7dd3..5cf21669bd10 100644
--- a/compiler/main/DynFlags.hs-boot
+++ b/compiler/main/DynFlags.hs-boot
@@ -9,4 +9,5 @@ targetPlatform :: DynFlags -> Platform
pprUserLength :: DynFlags -> Int
pprCols :: DynFlags -> Int
unsafeGlobalDynFlags :: DynFlags
-useUnicodeQuotes :: DynFlags -> Bool
+useUnicode :: DynFlags -> Bool
+useUnicodeSyntax :: DynFlags -> Bool
diff --git a/compiler/main/DynamicLoading.hs b/compiler/main/DynamicLoading.hs
index ffafc7821672..046d13cee57e 100644
--- a/compiler/main/DynamicLoading.hs
+++ b/compiler/main/DynamicLoading.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, MagicHash #-}
+
-- | Dynamically lookup up values from modules and loading them.
module DynamicLoading (
#ifdef GHCI
diff --git a/compiler/main/ErrUtils.lhs b/compiler/main/ErrUtils.lhs
index 12b6bad68aad..c43064e7f1e7 100644
--- a/compiler/main/ErrUtils.lhs
+++ b/compiler/main/ErrUtils.lhs
@@ -4,17 +4,21 @@
\section[ErrsUtils]{Utilities for error reporting}
\begin{code}
+{-# LANGUAGE CPP #-}
module ErrUtils (
+ MsgDoc,
+ Validity(..), andValid, allValid, isValid, getInvalids,
+
ErrMsg, WarnMsg, Severity(..),
Messages, ErrorMessages, WarningMessages,
errMsgSpan, errMsgContext, errMsgShortDoc, errMsgExtraInfo,
- MsgDoc, mkLocMessage, pprMessageBag, pprErrMsgBag, pprErrMsgBagWithLoc,
+ mkLocMessage, pprMessageBag, pprErrMsgBag, pprErrMsgBagWithLoc,
pprLocErrMsg, makeIntoWarning,
-
+
errorsFound, emptyMessages, isEmptyMessages,
mkErrMsg, mkPlainErrMsg, mkLongErrMsg, mkWarnMsg, mkPlainWarnMsg,
- printBagOfErrors,
+ printBagOfErrors,
warnIsErrorMsg, mkLongWarnMsg,
ghcExit,
@@ -45,7 +49,7 @@ import DynFlags
import System.Directory
import System.Exit ( ExitCode(..), exitWith )
-import System.FilePath
+import System.FilePath ( takeDirectory, (>) )
import Data.List
import qualified Data.Set as Set
import Data.IORef
@@ -55,6 +59,29 @@ import Control.Monad
import Control.Monad.IO.Class
import System.IO
+-------------------------
+type MsgDoc = SDoc
+
+-------------------------
+data Validity
+ = IsValid -- Everything is fine
+ | NotValid MsgDoc -- A problem, and some indication of why
+
+isValid :: Validity -> Bool
+isValid IsValid = True
+isValid (NotValid {}) = False
+
+andValid :: Validity -> Validity -> Validity
+andValid IsValid v = v
+andValid v _ = v
+
+allValid :: [Validity] -> Validity -- If they aren't all valid, return the first
+allValid [] = IsValid
+allValid (v : vs) = v `andValid` allValid vs
+
+getInvalids :: [Validity] -> [MsgDoc]
+getInvalids vs = [d | NotValid d <- vs]
+
-- -----------------------------------------------------------------------------
-- Basic error messages: just render a message with a source location.
@@ -73,7 +100,6 @@ data ErrMsg = ErrMsg {
-- The SrcSpan is used for sorting errors into line-number order
type WarnMsg = ErrMsg
-type MsgDoc = SDoc
data Severity
= SevOutput
diff --git a/compiler/main/Finder.lhs b/compiler/main/Finder.lhs
index 60683b2289ea..f9c7e2eee02a 100644
--- a/compiler/main/Finder.lhs
+++ b/compiler/main/Finder.lhs
@@ -4,6 +4,8 @@
\section[Finder]{Module Finder}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Finder (
flushFinderCaches,
FindResult(..),
@@ -41,13 +43,12 @@ import Maybes ( expectJust )
import Exception ( evaluate )
import Distribution.Text
-import Distribution.Package hiding (PackageId)
import Data.IORef ( IORef, writeIORef, readIORef, atomicModifyIORef )
import System.Directory
import System.FilePath
import Control.Monad
-import Data.List ( partition )
import Data.Time
+import Data.List ( foldl' )
type FileExt = String -- Filename extension
@@ -78,12 +79,12 @@ flushFinderCaches hsc_env = do
fc_ref = hsc_FC hsc_env
mlc_ref = hsc_MLC hsc_env
-flushModLocationCache :: PackageId -> IORef ModLocationCache -> IO ()
+flushModLocationCache :: PackageKey -> IORef ModLocationCache -> IO ()
flushModLocationCache this_pkg ref = do
atomicModifyIORef ref $ \fm -> (filterModuleEnv is_ext fm, ())
_ <- evaluate =<< readIORef ref
return ()
- where is_ext mod _ | modulePackageId mod /= this_pkg = True
+ where is_ext mod _ | modulePackageKey mod /= this_pkg = True
| otherwise = False
addToFinderCache :: IORef FinderCache -> ModuleName -> FindResult -> IO ()
@@ -146,7 +147,7 @@ findImportedModule hsc_env mod_name mb_pkg =
findExactModule :: HscEnv -> Module -> IO FindResult
findExactModule hsc_env mod =
let dflags = hsc_dflags hsc_env
- in if modulePackageId mod == thisPackage dflags
+ in if modulePackageKey mod == thisPackage dflags
then findHomeModule hsc_env (moduleName mod)
else findPackageModule hsc_env mod
@@ -188,41 +189,21 @@ homeSearchCache hsc_env mod_name do_this = do
findExposedPackageModule :: HscEnv -> ModuleName -> Maybe FastString
-> IO FindResult
findExposedPackageModule hsc_env mod_name mb_pkg
- -- not found in any package:
- = case lookupModuleWithSuggestions (hsc_dflags hsc_env) mod_name of
- Left suggest -> return (NotFound { fr_paths = [], fr_pkg = Nothing
- , fr_pkgs_hidden = []
- , fr_mods_hidden = []
- , fr_suggestions = suggest })
- Right found
- | null found_exposed -- Found, but with no exposed copies
- -> return (NotFound { fr_paths = [], fr_pkg = Nothing
- , fr_pkgs_hidden = pkg_hiddens
- , fr_mods_hidden = mod_hiddens
- , fr_suggestions = [] })
-
- | [(pkg_conf,_)] <- found_exposed -- Found uniquely
- -> let pkgid = packageConfigId pkg_conf in
- findPackageModule_ hsc_env (mkModule pkgid mod_name) pkg_conf
-
- | otherwise -- Found in more than one place
- -> return (FoundMultiple (map (packageConfigId.fst) found_exposed))
- where
- for_this_pkg = case mb_pkg of
- Nothing -> found
- Just p -> filter ((`matches` p) . fst) found
- found_exposed = filter is_exposed for_this_pkg
- is_exposed (pkg_conf,exposed_mod) = exposed pkg_conf && exposed_mod
-
- mod_hiddens = [ packageConfigId pkg_conf
- | (pkg_conf,False) <- found ]
-
- pkg_hiddens = [ packageConfigId pkg_conf
- | (pkg_conf,_) <- found, not (exposed pkg_conf) ]
-
- pkg_conf `matches` pkg
- = case packageName pkg_conf of
- PackageName n -> pkg == mkFastString n
+ = case lookupModuleWithSuggestions (hsc_dflags hsc_env) mod_name mb_pkg of
+ LookupFound m pkg_conf ->
+ findPackageModule_ hsc_env m pkg_conf
+ LookupMultiple rs ->
+ return (FoundMultiple rs)
+ LookupHidden pkg_hiddens mod_hiddens ->
+ return (NotFound{ fr_paths = [], fr_pkg = Nothing
+ , fr_pkgs_hidden = map (modulePackageKey.fst) pkg_hiddens
+ , fr_mods_hidden = map (modulePackageKey.fst) mod_hiddens
+ , fr_suggestions = [] })
+ LookupNotFound suggest ->
+ return (NotFound{ fr_paths = [], fr_pkg = Nothing
+ , fr_pkgs_hidden = []
+ , fr_mods_hidden = []
+ , fr_suggestions = suggest })
modLocationCache :: HscEnv -> Module -> IO FindResult -> IO FindResult
modLocationCache hsc_env mod do_this = do
@@ -293,15 +274,22 @@ findPackageModule :: HscEnv -> Module -> IO FindResult
findPackageModule hsc_env mod = do
let
dflags = hsc_dflags hsc_env
- pkg_id = modulePackageId mod
- pkg_map = pkgIdMap (pkgState dflags)
+ pkg_id = modulePackageKey mod
--
- case lookupPackage pkg_map pkg_id of
+ case lookupPackage dflags pkg_id of
Nothing -> return (NoPackage pkg_id)
Just pkg_conf -> findPackageModule_ hsc_env mod pkg_conf
+-- | Look up the interface file associated with module @mod@. This function
+-- requires a few invariants to be upheld: (1) the 'Module' in question must
+-- be the module identifier of the *original* implementation of a module,
+-- not a reexport (this invariant is upheld by @Packages.lhs@) and (2)
+-- the 'PackageConfig' must be consistent with the package key in the 'Module'.
+-- The redundancy is to avoid an extra lookup in the package state
+-- for the appropriate config.
findPackageModule_ :: HscEnv -> Module -> PackageConfig -> IO FindResult
findPackageModule_ hsc_env mod pkg_conf =
+ ASSERT( modulePackageKey mod == packageConfigId pkg_conf )
modLocationCache hsc_env mod $
-- special case for GHC.Prim; we won't find it in the filesystem.
@@ -371,7 +359,7 @@ searchPathExts paths mod exts
]
search [] = return (NotFound { fr_paths = map fst to_search
- , fr_pkg = Just (modulePackageId mod)
+ , fr_pkg = Just (modulePackageKey mod)
, fr_mods_hidden = [], fr_pkgs_hidden = []
, fr_suggestions = [] })
@@ -432,8 +420,8 @@ mkHomeModLocation2 :: DynFlags
mkHomeModLocation2 dflags mod src_basename ext = do
let mod_basename = moduleNameSlashes mod
- obj_fn <- mkObjPath dflags src_basename mod_basename
- hi_fn <- mkHiPath dflags src_basename mod_basename
+ obj_fn = mkObjPath dflags src_basename mod_basename
+ hi_fn = mkHiPath dflags src_basename mod_basename
return (ModLocation{ ml_hs_file = Just (src_basename <.> ext),
ml_hi_file = hi_fn,
@@ -443,7 +431,7 @@ mkHiOnlyModLocation :: DynFlags -> Suffix -> FilePath -> String
-> IO ModLocation
mkHiOnlyModLocation dflags hisuf path basename
= do let full_basename = path > basename
- obj_fn <- mkObjPath dflags full_basename basename
+ obj_fn = mkObjPath dflags full_basename basename
return ModLocation{ ml_hs_file = Nothing,
ml_hi_file = full_basename <.> hisuf,
-- Remove the .hi-boot suffix from
@@ -459,16 +447,15 @@ mkObjPath
:: DynFlags
-> FilePath -- the filename of the source file, minus the extension
-> String -- the module name with dots replaced by slashes
- -> IO FilePath
-mkObjPath dflags basename mod_basename
- = do let
+ -> FilePath
+mkObjPath dflags basename mod_basename = obj_basename <.> osuf
+ where
odir = objectDir dflags
osuf = objectSuf dflags
obj_basename | Just dir <- odir = dir > mod_basename
| otherwise = basename
- return (obj_basename <.> osuf)
-- | Constructs the filename of a .hi file for a given source file.
-- Does /not/ check whether the .hi file exists
@@ -476,16 +463,15 @@ mkHiPath
:: DynFlags
-> FilePath -- the filename of the source file, minus the extension
-> String -- the module name with dots replaced by slashes
- -> IO FilePath
-mkHiPath dflags basename mod_basename
- = do let
+ -> FilePath
+mkHiPath dflags basename mod_basename = hi_basename <.> hisuf
+ where
hidir = hiDir dflags
hisuf = hiSuf dflags
hi_basename | Just dir <- hidir = dir > mod_basename
| otherwise = basename
- return (hi_basename <.> hisuf)
-- -----------------------------------------------------------------------------
@@ -548,18 +534,38 @@ cannotFindInterface = cantFindErr (sLit "Failed to load interface for")
cantFindErr :: LitString -> LitString -> DynFlags -> ModuleName -> FindResult
-> SDoc
-cantFindErr _ multiple_found _ mod_name (FoundMultiple pkgs)
+cantFindErr _ multiple_found _ mod_name (FoundMultiple mods)
+ | Just pkgs <- unambiguousPackages
= hang (ptext multiple_found <+> quotes (ppr mod_name) <> colon) 2 (
sep [ptext (sLit "it was found in multiple packages:"),
- hsep (map (text.packageIdString) pkgs)]
+ hsep (map ppr pkgs) ]
)
+ | otherwise
+ = hang (ptext multiple_found <+> quotes (ppr mod_name) <> colon) 2 (
+ vcat (map pprMod mods)
+ )
+ where
+ unambiguousPackages = foldl' unambiguousPackage (Just []) mods
+ unambiguousPackage (Just xs) (m, ModOrigin (Just _) _ _ _)
+ = Just (modulePackageKey m : xs)
+ unambiguousPackage _ _ = Nothing
+
+ pprMod (m, o) = ptext (sLit "it is bound as") <+> ppr m <+>
+ ptext (sLit "by") <+> pprOrigin m o
+ pprOrigin _ ModHidden = panic "cantFindErr: bound by mod hidden"
+ pprOrigin m (ModOrigin e res _ f) = sep $ punctuate comma (
+ if e == Just True
+ then [ptext (sLit "package") <+> ppr (modulePackageKey m)]
+ else [] ++
+ map ((ptext (sLit "a reexport in package") <+>)
+ .ppr.packageConfigId) res ++
+ if f then [ptext (sLit "a package flag")] else []
+ )
+
cantFindErr cannot_find _ dflags mod_name find_result
= ptext cannot_find <+> quotes (ppr mod_name)
$$ more_info
where
- pkg_map :: PackageConfigMap
- pkg_map = pkgIdMap (pkgState dflags)
-
more_info
= case find_result of
NoPackage pkg
@@ -615,7 +621,7 @@ cantFindErr cannot_find _ dflags mod_name find_result
<> dot $$ cabal_pkg_hidden_hint pkg
cabal_pkg_hidden_hint pkg
| gopt Opt_BuildingCabalPackage dflags
- = case simpleParse (packageIdString pkg) of
+ = case simpleParse (packageKeyString pkg) of
Just pid ->
ptext (sLit "Perhaps you need to add") <+>
quotes (text (display (pkgName pid))) <+>
@@ -626,22 +632,40 @@ cantFindErr cannot_find _ dflags mod_name find_result
mod_hidden pkg =
ptext (sLit "it is a hidden module in the package") <+> quotes (ppr pkg)
- pp_suggestions :: [Module] -> SDoc
+ pp_suggestions :: [ModuleSuggestion] -> SDoc
pp_suggestions sugs
| null sugs = empty
| otherwise = hang (ptext (sLit "Perhaps you meant"))
- 2 (vcat [ vcat (map pp_exp exposed_sugs)
- , vcat (map pp_hid hidden_sugs) ])
- where
- (exposed_sugs, hidden_sugs) = partition from_exposed_pkg sugs
-
- from_exposed_pkg m = case lookupPackage pkg_map (modulePackageId m) of
- Just pkg_config -> exposed pkg_config
- Nothing -> WARN( True, ppr m ) -- Should not happen
- False
-
- pp_exp mod = ppr (moduleName mod)
- <+> parens (ptext (sLit "from") <+> ppr (modulePackageId mod))
- pp_hid mod = ppr (moduleName mod)
- <+> parens (ptext (sLit "needs flag -package") <+> ppr (modulePackageId mod))
+ 2 (vcat (map pp_sugg sugs))
+
+ -- NB: Prefer the *original* location, and then reexports, and then
+ -- package flags when making suggestions. ToDo: if the original package
+ -- also has a reexport, prefer that one
+ pp_sugg (SuggestVisible m mod o) = ppr m <+> provenance o
+ where provenance ModHidden = empty
+ provenance (ModOrigin{ fromOrigPackage = e,
+ fromExposedReexport = res,
+ fromPackageFlag = f })
+ | Just True <- e
+ = parens (ptext (sLit "from") <+> ppr (modulePackageKey mod))
+ | f && moduleName mod == m
+ = parens (ptext (sLit "from") <+> ppr (modulePackageKey mod))
+ | (pkg:_) <- res
+ = parens (ptext (sLit "from") <+> ppr (packageConfigId pkg)
+ <> comma <+> ptext (sLit "reexporting") <+> ppr mod)
+ | f
+ = parens (ptext (sLit "defined via package flags to be")
+ <+> ppr mod)
+ | otherwise = empty
+ pp_sugg (SuggestHidden m mod o) = ppr m <+> provenance o
+ where provenance ModHidden = empty
+ provenance (ModOrigin{ fromOrigPackage = e,
+ fromHiddenReexport = rhs })
+ | Just False <- e
+ = parens (ptext (sLit "needs flag -package-key")
+ <+> ppr (modulePackageKey mod))
+ | (pkg:_) <- rhs
+ = parens (ptext (sLit "needs flag -package-key")
+ <+> ppr (packageConfigId pkg))
+ | otherwise = empty
\end{code}
diff --git a/compiler/main/GHC.hs b/compiler/main/GHC.hs
index 6b2815a4eeb8..9ab52ebf1d87 100644
--- a/compiler/main/GHC.hs
+++ b/compiler/main/GHC.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, NondecreasingIndentation, ScopedTypeVariables #-}
+
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow, 2005-2012
@@ -53,7 +55,6 @@ module GHC (
-- ** Compiling to Core
CoreModule(..),
compileToCoreModule, compileToCoreSimplified,
- compileCoreToObj,
-- * Inspecting the module structure of the program
ModuleGraph, ModSummary(..), ms_mod_name, ModLocation(..),
@@ -80,7 +81,7 @@ module GHC (
SafeHaskellMode(..),
-- * Querying the environment
- packageDbModules,
+ -- packageDbModules,
-- * Printing
PrintUnqualified, alwaysQualify,
@@ -102,6 +103,7 @@ module GHC (
parseName,
RunResult(..),
runStmt, runStmtWithLocation, runDecls, runDeclsWithLocation,
+ runTcInteractive, -- Desired by some clients (Trac #8878)
parseImportDecl, SingleStep(..),
resume,
Resume(resumeStmt, resumeThreadId, resumeBreakInfo, resumeSpan,
@@ -131,10 +133,10 @@ module GHC (
-- * Abstract syntax elements
-- ** Packages
- PackageId,
+ PackageKey,
-- ** Modules
- Module, mkModule, pprModule, moduleName, modulePackageId,
+ Module, mkModule, pprModule, moduleName, modulePackageKey,
ModuleName, mkModuleName, moduleNameString,
-- ** Names
@@ -257,8 +259,10 @@ module GHC (
import ByteCodeInstr
import BreakArray
import InteractiveEval
+import TcRnDriver ( runTcInteractive )
#endif
+import PprTyThing ( pprFamInst )
import HscMain
import GhcMake
import DriverPipeline ( compileOne' )
@@ -281,7 +285,7 @@ import DataCon
import Name hiding ( varName )
import Avail
import InstEnv
-import FamInstEnv
+import FamInstEnv ( FamInst )
import SrcLoc
import CoreSyn
import TidyPgm
@@ -295,6 +299,7 @@ import Annotations
import Module
import UniqFM
import Panic
+import Platform
import Bag ( unitBag )
import ErrUtils
import MonadUtils
@@ -307,7 +312,7 @@ import FastString
import qualified Parser
import Lexer
-import System.Directory ( doesFileExist, getCurrentDirectory )
+import System.Directory ( doesFileExist )
import Data.Maybe
import Data.List ( find )
import Data.Time
@@ -441,7 +446,7 @@ runGhcT mb_top_dir ghct = do
-- reside. More precisely, this should be the output of @ghc --print-libdir@
-- of the version of GHC the module using this API is compiled with. For
-- portability, you should use the @ghc-paths@ package, available at
--- .
+-- .
initGhcMonad :: GhcMonad m => Maybe FilePath -> m ()
initGhcMonad mb_top_dir
@@ -450,12 +455,45 @@ initGhcMonad mb_top_dir
; initStaticOpts
; mySettings <- initSysTools mb_top_dir
; dflags <- initDynFlags (defaultDynFlags mySettings)
+ ; checkBrokenTablesNextToCode dflags
; setUnsafeGlobalDynFlags dflags
-- c.f. DynFlags.parseDynamicFlagsFull, which
-- creates DynFlags and sets the UnsafeGlobalDynFlags
; newHscEnv dflags }
; setSession env }
+-- | The binutils linker on ARM emits unnecessary R_ARM_COPY relocations which
+-- breaks tables-next-to-code in dynamically linked modules. This
+-- check should be more selective but there is currently no released
+-- version where this bug is fixed.
+-- See https://sourceware.org/bugzilla/show_bug.cgi?id=16177 and
+-- https://ghc.haskell.org/trac/ghc/ticket/4210#comment:29
+checkBrokenTablesNextToCode :: MonadIO m => DynFlags -> m ()
+checkBrokenTablesNextToCode dflags
+ = do { broken <- checkBrokenTablesNextToCode' dflags
+ ; when broken
+ $ do { _ <- liftIO $ throwIO $ mkApiErr dflags invalidLdErr
+ ; fail "unsupported linker"
+ }
+ }
+ where
+ invalidLdErr = text "Tables-next-to-code not supported on ARM" <+>
+ text "when using binutils ld (please see:" <+>
+ text "https://sourceware.org/bugzilla/show_bug.cgi?id=16177)"
+
+checkBrokenTablesNextToCode' :: MonadIO m => DynFlags -> m Bool
+checkBrokenTablesNextToCode' dflags
+ | not (isARM arch) = return False
+ | WayDyn `notElem` ways dflags = return False
+ | not (tablesNextToCode dflags) = return False
+ | otherwise = do
+ linkerInfo <- liftIO $ getLinkerInfo dflags
+ case linkerInfo of
+ GnuLD _ -> return True
+ _ -> return False
+ where platform = targetPlatform dflags
+ arch = platformArch platform
+
-- %************************************************************************
-- %* *
@@ -496,7 +534,7 @@ initGhcMonad mb_top_dir
-- flags. If you are not doing linking or doing static linking, you
-- can ignore the list of packages returned.
--
-setSessionDynFlags :: GhcMonad m => DynFlags -> m [PackageId]
+setSessionDynFlags :: GhcMonad m => DynFlags -> m [PackageKey]
setSessionDynFlags dflags = do
(dflags', preload) <- liftIO $ initPackages dflags
modifySession $ \h -> h{ hsc_dflags = dflags'
@@ -505,7 +543,7 @@ setSessionDynFlags dflags = do
return preload
-- | Sets the program 'DynFlags'.
-setProgramDynFlags :: GhcMonad m => DynFlags -> m [PackageId]
+setProgramDynFlags :: GhcMonad m => DynFlags -> m [PackageKey]
setProgramDynFlags dflags = do
(dflags', preload) <- liftIO $ initPackages dflags
modifySession $ \h -> h{ hsc_dflags = dflags' }
@@ -889,43 +927,6 @@ compileToCoreModule = compileCore False
-- as to return simplified and tidied Core.
compileToCoreSimplified :: GhcMonad m => FilePath -> m CoreModule
compileToCoreSimplified = compileCore True
--- | Takes a CoreModule and compiles the bindings therein
--- to object code. The first argument is a bool flag indicating
--- whether to run the simplifier.
--- The resulting .o, .hi, and executable files, if any, are stored in the
--- current directory, and named according to the module name.
--- This has only so far been tested with a single self-contained module.
-compileCoreToObj :: GhcMonad m
- => Bool -> CoreModule -> FilePath -> FilePath -> m ()
-compileCoreToObj simplify cm@(CoreModule{ cm_module = mName })
- output_fn extCore_filename = do
- dflags <- getSessionDynFlags
- currentTime <- liftIO $ getCurrentTime
- cwd <- liftIO $ getCurrentDirectory
- modLocation <- liftIO $ mkHiOnlyModLocation dflags (hiSuf dflags) cwd
- ((moduleNameSlashes . moduleName) mName)
-
- let modSum = ModSummary { ms_mod = mName,
- ms_hsc_src = ExtCoreFile,
- ms_location = modLocation,
- -- By setting the object file timestamp to Nothing,
- -- we always force recompilation, which is what we
- -- want. (Thus it doesn't matter what the timestamp
- -- for the (nonexistent) source file is.)
- ms_hs_date = currentTime,
- ms_obj_date = Nothing,
- -- Only handling the single-module case for now, so no imports.
- ms_srcimps = [],
- ms_textual_imps = [],
- -- No source file
- ms_hspp_file = "",
- ms_hspp_opts = dflags,
- ms_hspp_buf = Nothing
- }
-
- hsc_env <- getSession
- liftIO $ hscCompileCore hsc_env simplify (cm_safe cm) modSum (cm_binds cm) output_fn extCore_filename
-
compileCore :: GhcMonad m => Bool -> FilePath -> m CoreModule
compileCore simplify fn = do
@@ -1166,9 +1167,10 @@ getGRE = withSession $ \hsc_env-> return $ ic_rn_gbl_env (hsc_IC hsc_env)
-- -----------------------------------------------------------------------------
+{- ToDo: Move the primary logic here to compiler/main/Packages.lhs
-- | Return all /external/ modules available in the package database.
-- Modules from the current session (i.e., from the 'HomePackageTable') are
--- not included.
+-- not included. This includes module names which are reexported by packages.
packageDbModules :: GhcMonad m =>
Bool -- ^ Only consider exposed packages.
-> m [Module]
@@ -1176,10 +1178,13 @@ packageDbModules only_exposed = do
dflags <- getSessionDynFlags
let pkgs = eltsUFM (pkgIdMap (pkgState dflags))
return $
- [ mkModule pid modname | p <- pkgs
- , not only_exposed || exposed p
- , let pid = packageConfigId p
- , modname <- exposedModules p ]
+ [ mkModule pid modname
+ | p <- pkgs
+ , not only_exposed || exposed p
+ , let pid = packageConfigId p
+ , modname <- exposedModules p
+ ++ map exportName (reexportedModules p) ]
+ -}
-- -----------------------------------------------------------------------------
-- Misc exported utils
@@ -1300,7 +1305,7 @@ showRichTokenStream ts = go startLoc ts ""
-- -----------------------------------------------------------------------------
-- Interactive evaluation
--- | Takes a 'ModuleName' and possibly a 'PackageId', and consults the
+-- | Takes a 'ModuleName' and possibly a 'PackageKey', and consults the
-- filesystem and package database to find the corresponding 'Module',
-- using the algorithm that is used for an @import@ declaration.
findModule :: GhcMonad m => ModuleName -> Maybe FastString -> m Module
@@ -1310,7 +1315,7 @@ findModule mod_name maybe_pkg = withSession $ \hsc_env -> do
this_pkg = thisPackage dflags
--
case maybe_pkg of
- Just pkg | fsToPackageId pkg /= this_pkg && pkg /= fsLit "this" -> liftIO $ do
+ Just pkg | fsToPackageKey pkg /= this_pkg && pkg /= fsLit "this" -> liftIO $ do
res <- findImportedModule hsc_env mod_name maybe_pkg
case res of
Found _ m -> return m
@@ -1322,7 +1327,7 @@ findModule mod_name maybe_pkg = withSession $ \hsc_env -> do
Nothing -> liftIO $ do
res <- findImportedModule hsc_env mod_name maybe_pkg
case res of
- Found loc m | modulePackageId m /= this_pkg -> return m
+ Found loc m | modulePackageKey m /= this_pkg -> return m
| otherwise -> modNotLoadedError dflags m loc
err -> throwOneError $ noModError dflags noSrcSpan mod_name err
@@ -1367,7 +1372,7 @@ isModuleTrusted m = withSession $ \hsc_env ->
liftIO $ hscCheckSafe hsc_env m noSrcSpan
-- | Return if a module is trusted and the pkgs it depends on to be trusted.
-moduleTrustReqs :: GhcMonad m => Module -> m (Bool, [PackageId])
+moduleTrustReqs :: GhcMonad m => Module -> m (Bool, [PackageKey])
moduleTrustReqs m = withSession $ \hsc_env ->
liftIO $ hscGetSafe hsc_env m noSrcSpan
diff --git a/compiler/main/GhcMake.hs b/compiler/main/GhcMake.hs
index c8afd83beb1d..0c63203d4c63 100644
--- a/compiler/main/GhcMake.hs
+++ b/compiler/main/GhcMake.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE BangPatterns, CPP, NondecreasingIndentation, ScopedTypeVariables #-}
{-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
-- NB: we specifically ignore deprecations. GHC 7.6 marks the .QSem module as
-- deprecated, although it became un-deprecated later. As a result, using 7.6
@@ -46,7 +46,7 @@ import BasicTypes
import Digraph
import Exception ( tryIO, gbracket, gfinally )
import FastString
-import Maybes ( expectJust, mapCatMaybes )
+import Maybes ( expectJust )
import MonadUtils ( allM, MonadIO )
import Outputable
import Panic
@@ -63,6 +63,7 @@ import qualified Data.Set as Set
import qualified FiniteMap as Map ( insertListWith )
import Control.Concurrent ( forkIOWithUnmask, killThread )
+import qualified GHC.Conc as CC
import Control.Concurrent.MVar
import Control.Concurrent.QSem
import Control.Exception
@@ -80,6 +81,11 @@ import System.IO.Error ( isDoesNotExistError )
import GHC.Conc ( getNumProcessors, getNumCapabilities, setNumCapabilities )
+label_self :: String -> IO ()
+label_self thread_name = do
+ self_tid <- CC.myThreadId
+ CC.labelThread self_tid thread_name
+
-- -----------------------------------------------------------------------------
-- Loading the program
@@ -744,10 +750,18 @@ parUpsweep n_jobs old_hpt stable_mods cleanup sccs = do
| ((ms,mvar,_),idx) <- comp_graph_w_idx ]
+ liftIO $ label_self "main --make thread"
-- For each module in the module graph, spawn a worker thread that will
-- compile this module.
let { spawnWorkers = forM comp_graph_w_idx $ \((mod,!mvar,!log_queue),!mod_idx) ->
forkIOWithUnmask $ \unmask -> do
+ liftIO $ label_self $ unwords
+ [ "worker --make thread"
+ , "for module"
+ , show (moduleNameString (ms_mod_name mod))
+ , "number"
+ , show mod_idx
+ ]
-- Replace the default log_action with one that writes each
-- message to the module's log_queue. The main thread will
-- deal with synchronously printing these messages.
@@ -1443,7 +1457,7 @@ moduleGraphNodes drop_hs_boot_nodes summaries = (graphFromEdgedVertices nodes, l
| otherwise = HsBootFile
out_edge_keys :: HscSource -> [ModuleName] -> [Int]
- out_edge_keys hi_boot ms = mapCatMaybes (lookup_key hi_boot) ms
+ out_edge_keys hi_boot ms = mapMaybe (lookup_key hi_boot) ms
-- If we want keep_hi_boot_nodes, then we do lookup_key with
-- the IsBootInterface parameter True; else False
@@ -1786,7 +1800,7 @@ summariseModule hsc_env old_summary_map is_boot (L loc wanted_mod)
just_found location mod
| otherwise ->
-- Drop external-pkg
- ASSERT(modulePackageId mod /= thisPackage dflags)
+ ASSERT(modulePackageKey mod /= thisPackage dflags)
return Nothing
err -> return $ Just $ Left $ noModError dflags loc wanted_mod err
diff --git a/compiler/main/GhcMonad.hs b/compiler/main/GhcMonad.hs
index 68b4e2b2a272..5fa6452d587f 100644
--- a/compiler/main/GhcMonad.hs
+++ b/compiler/main/GhcMonad.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE RankNTypes #-}
{-# OPTIONS_GHC -funbox-strict-fields #-}
-- -----------------------------------------------------------------------------
--
diff --git a/compiler/main/HeaderInfo.hs b/compiler/main/HeaderInfo.hs
index a083f4fcd8c8..fcf235bd23f9 100644
--- a/compiler/main/HeaderInfo.hs
+++ b/compiler/main/HeaderInfo.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- | Parsing the top of a Haskell source file to get its module name,
@@ -185,8 +187,8 @@ lazyGetToks dflags filename handle = do
-- large module names (#5981)
nextbuf <- hGetStringBufferBlock handle new_size
if (len nextbuf == 0) then lazyLexBuf handle state True new_size else do
- newbuf <- appendStringBuffers (buffer state) nextbuf
- unsafeInterleaveIO $ lazyLexBuf handle state{buffer=newbuf} False new_size
+ newbuf <- appendStringBuffers (buffer state) nextbuf
+ unsafeInterleaveIO $ lazyLexBuf handle state{buffer=newbuf} False new_size
getToks :: DynFlags -> FilePath -> StringBuffer -> [Located Token]
diff --git a/compiler/main/Hooks.lhs b/compiler/main/Hooks.lhs
index 3bd9643dc66f..63aaafa2a7f5 100644
--- a/compiler/main/Hooks.lhs
+++ b/compiler/main/Hooks.lhs
@@ -63,7 +63,7 @@ data Hooks = Hooks
, tcForeignImportsHook :: Maybe ([LForeignDecl Name] -> TcM ([Id], [LForeignDecl Id], Bag GlobalRdrElt))
, tcForeignExportsHook :: Maybe ([LForeignDecl Name] -> TcM (LHsBinds TcId, [LForeignDecl TcId], Bag GlobalRdrElt))
, hscFrontendHook :: Maybe (ModSummary -> Hsc TcGblEnv)
- , hscCompileOneShotHook :: Maybe (HscEnv -> FilePath -> ModSummary -> SourceModified -> IO HscStatus)
+ , hscCompileOneShotHook :: Maybe (HscEnv -> ModSummary -> SourceModified -> IO HscStatus)
, hscCompileCoreExprHook :: Maybe (HscEnv -> SrcSpan -> CoreExpr -> IO HValue)
, ghcPrimIfaceHook :: Maybe ModIface
, runPhaseHook :: Maybe (PhasePlus -> FilePath -> DynFlags -> CompPipeline (PhasePlus, FilePath))
diff --git a/compiler/main/HscMain.hs b/compiler/main/HscMain.hs
index 04b0823db41c..d0a50d8e1378 100644
--- a/compiler/main/HscMain.hs
+++ b/compiler/main/HscMain.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE BangPatterns, CPP, MagicHash, NondecreasingIndentation #-}
+
-------------------------------------------------------------------------------
--
-- | Main API for compiling plain Haskell source code.
@@ -146,7 +148,6 @@ import ErrUtils
import Outputable
import HscStats ( ppSourceStats )
import HscTypes
-import MkExternalCore ( emitExternalCore )
import FastString
import UniqFM ( emptyUFM )
import UniqSupply
@@ -406,19 +407,20 @@ tcRnModule' hsc_env sum save_rn_syntax mod = do
tcSafeOK <- liftIO $ readIORef (tcg_safeInfer tcg_res)
dflags <- getDynFlags
+ let allSafeOK = safeInferred dflags && tcSafeOK
- -- end of the Safe Haskell line, how to respond to user?
- if not (safeHaskellOn dflags) || (safeInferOn dflags && not tcSafeOK)
- -- if safe haskell off or safe infer failed, wipe trust
- then wipeTrust tcg_res emptyBag
+ -- end of the safe haskell line, how to respond to user?
+ if not (safeHaskellOn dflags) || (safeInferOn dflags && not allSafeOK)
+ -- if safe Haskell off or safe infer failed, mark unsafe
+ then markUnsafe tcg_res emptyBag
- -- module safe, throw warning if needed
+ -- module (could be) safe, throw warning if needed
else do
tcg_res' <- hscCheckSafeImports tcg_res
safe <- liftIO $ readIORef (tcg_safeInfer tcg_res')
when (safe && wopt Opt_WarnSafe dflags)
- (logWarnings $ unitBag $
- mkPlainWarnMsg dflags (warnSafeOnLoc dflags) $ errSafe tcg_res')
+ (logWarnings $ unitBag $ mkPlainWarnMsg dflags
+ (warnSafeOnLoc dflags) $ errSafe tcg_res')
return tcg_res'
where
pprMod t = ppr $ moduleName $ tcg_mod t
@@ -516,8 +518,9 @@ genericHscCompileGetFrontendResult ::
-> (Int,Int) -- (i,n) = module i of n (for msgs)
-> IO (Either ModIface (TcGblEnv, Maybe Fingerprint))
-genericHscCompileGetFrontendResult always_do_basic_recompilation_check m_tc_result
- mHscMessage hsc_env mod_summary source_modified mb_old_iface mod_index
+genericHscCompileGetFrontendResult
+ always_do_basic_recompilation_check m_tc_result
+ mHscMessage hsc_env mod_summary source_modified mb_old_iface mod_index
= do
let msg what = case mHscMessage of
@@ -553,16 +556,19 @@ genericHscCompileGetFrontendResult always_do_basic_recompilation_check m_tc_resu
case mb_checked_iface of
Just iface | not (recompileRequired recomp_reqd) ->
- -- If the module used TH splices when it was last compiled,
- -- then the recompilation check is not accurate enough (#481)
- -- and we must ignore it. However, if the module is stable
- -- (none of the modules it depends on, directly or indirectly,
- -- changed), then we *can* skip recompilation. This is why
- -- the SourceModified type contains SourceUnmodifiedAndStable,
- -- and it's pretty important: otherwise ghc --make would
- -- always recompile TH modules, even if nothing at all has
- -- changed. Stability is just the same check that make is
- -- doing for us in one-shot mode.
+ -- If the module used TH splices when it was last
+ -- compiled, then the recompilation check is not
+ -- accurate enough (#481) and we must ignore
+ -- it. However, if the module is stable (none of
+ -- the modules it depends on, directly or
+ -- indirectly, changed), then we *can* skip
+ -- recompilation. This is why the SourceModified
+ -- type contains SourceUnmodifiedAndStable, and
+ -- it's pretty important: otherwise ghc --make
+ -- would always recompile TH modules, even if
+ -- nothing at all has changed. Stability is just
+ -- the same check that make is doing for us in
+ -- one-shot mode.
case m_tc_result of
Nothing
| mi_used_th iface && not stable ->
@@ -580,31 +586,25 @@ genericHscFrontend mod_summary =
getHooked hscFrontendHook genericHscFrontend' >>= ($ mod_summary)
genericHscFrontend' :: ModSummary -> Hsc TcGblEnv
-genericHscFrontend' mod_summary
- | ExtCoreFile <- ms_hsc_src mod_summary =
- panic "GHC does not currently support reading External Core files"
- | otherwise =
- hscFileFrontEnd mod_summary
+genericHscFrontend' mod_summary = hscFileFrontEnd mod_summary
--------------------------------------------------------------
-- Compilers
--------------------------------------------------------------
hscCompileOneShot :: HscEnv
- -> FilePath
-> ModSummary
-> SourceModified
-> IO HscStatus
hscCompileOneShot env =
lookupHook hscCompileOneShotHook hscCompileOneShot' (hsc_dflags env) env
--- Compile Haskell, boot and extCore in OneShot mode.
+-- Compile Haskell/boot in OneShot mode.
hscCompileOneShot' :: HscEnv
- -> FilePath
-> ModSummary
-> SourceModified
-> IO HscStatus
-hscCompileOneShot' hsc_env extCore_filename mod_summary src_changed
+hscCompileOneShot' hsc_env mod_summary src_changed
= do
-- One-shot mode needs a knot-tying mutable variable for interface
-- files. See TcRnTypes.TcGblEnv.tcg_type_env_var.
@@ -624,7 +624,11 @@ hscCompileOneShot' hsc_env extCore_filename mod_summary src_changed
guts0 <- hscDesugar' (ms_location mod_summary) tc_result
dflags <- getDynFlags
case hscTarget dflags of
- HscNothing -> return HscNotGeneratingCode
+ HscNothing -> do
+ when (gopt Opt_WriteInterface dflags) $ liftIO $ do
+ (iface, changed, _details) <- hscSimpleIface hsc_env tc_result mb_old_hash
+ hscWriteIface dflags iface changed mod_summary
+ return HscNotGeneratingCode
_ ->
case ms_hsc_src mod_summary of
HsBootFile ->
@@ -633,7 +637,7 @@ hscCompileOneShot' hsc_env extCore_filename mod_summary src_changed
return HscUpdateBoot
_ ->
do guts <- hscSimplify' guts0
- (iface, changed, _details, cgguts) <- hscNormalIface' extCore_filename guts mb_old_hash
+ (iface, changed, _details, cgguts) <- hscNormalIface' guts mb_old_hash
liftIO $ hscWriteIface dflags iface changed mod_summary
return $ HscRecomp cgguts mod_summary
@@ -770,16 +774,15 @@ hscCheckSafeImports tcg_env = do
tcg_env' <- checkSafeImports dflags tcg_env
case safeLanguageOn dflags of
True -> do
- -- we nuke user written RULES in -XSafe
+ -- XSafe: we nuke user written RULES
logWarnings $ warns dflags (tcg_rules tcg_env')
return tcg_env' { tcg_rules = [] }
False
- -- user defined RULES, so not safe or already unsafe
- | safeInferOn dflags && not (null $ tcg_rules tcg_env') ||
- safeHaskell dflags == Sf_None
- -> wipeTrust tcg_env' $ warns dflags (tcg_rules tcg_env')
+ -- SafeInferred: user defined RULES, so not safe
+ | safeInferOn dflags && not (null $ tcg_rules tcg_env')
+ -> markUnsafe tcg_env' $ warns dflags (tcg_rules tcg_env')
- -- trustworthy OR safe inferred with no RULES
+ -- Trustworthy OR SafeInferred: with no RULES
| otherwise
-> return tcg_env'
@@ -825,7 +828,7 @@ checkSafeImports dflags tcg_env
True ->
-- did we fail safe inference or fail -XSafe?
case safeInferOn dflags of
- True -> wipeTrust tcg_env errs
+ True -> markUnsafe tcg_env errs
False -> liftIO . throwIO . mkSrcErr $ errs
-- All good matey!
@@ -839,14 +842,16 @@ checkSafeImports dflags tcg_env
imp_info = tcg_imports tcg_env -- ImportAvails
imports = imp_mods imp_info -- ImportedMods
imports' = moduleEnvToList imports -- (Module, [ImportedModsVal])
- pkg_reqs = imp_trust_pkgs imp_info -- [PackageId]
+ pkg_reqs = imp_trust_pkgs imp_info -- [PackageKey]
condense :: (Module, [ImportedModsVal]) -> Hsc (Module, SrcSpan, IsSafeImport)
condense (_, []) = panic "HscMain.condense: Pattern match failure!"
condense (m, x:xs) = do (_,_,l,s) <- foldlM cond' x xs
-- we turn all imports into safe ones when
-- inference mode is on.
- let s' = if safeInferOn dflags then True else s
+ let s' = if safeInferOn dflags &&
+ safeHaskell dflags == Sf_None
+ then True else s
return (m, l, s')
-- ImportedModsVal = (ModuleName, Bool, SrcSpan, IsSafeImport)
@@ -876,7 +881,7 @@ hscCheckSafe hsc_env m l = runHsc hsc_env $ do
return $ isEmptyBag errs
-- | Return if a module is trusted and the pkgs it depends on to be trusted.
-hscGetSafe :: HscEnv -> Module -> SrcSpan -> IO (Bool, [PackageId])
+hscGetSafe :: HscEnv -> Module -> SrcSpan -> IO (Bool, [PackageKey])
hscGetSafe hsc_env m l = runHsc hsc_env $ do
dflags <- getDynFlags
(self, pkgs) <- hscCheckSafe' dflags m l
@@ -890,15 +895,15 @@ hscGetSafe hsc_env m l = runHsc hsc_env $ do
-- Return (regardless of trusted or not) if the trust type requires the modules
-- own package be trusted and a list of other packages required to be trusted
-- (these later ones haven't been checked) but the own package trust has been.
-hscCheckSafe' :: DynFlags -> Module -> SrcSpan -> Hsc (Maybe PackageId, [PackageId])
+hscCheckSafe' :: DynFlags -> Module -> SrcSpan -> Hsc (Maybe PackageKey, [PackageKey])
hscCheckSafe' dflags m l = do
(tw, pkgs) <- isModSafe m l
case tw of
False -> return (Nothing, pkgs)
True | isHomePkg m -> return (Nothing, pkgs)
- | otherwise -> return (Just $ modulePackageId m, pkgs)
+ | otherwise -> return (Just $ modulePackageKey m, pkgs)
where
- isModSafe :: Module -> SrcSpan -> Hsc (Bool, [PackageId])
+ isModSafe :: Module -> SrcSpan -> Hsc (Bool, [PackageKey])
isModSafe m l = do
iface <- lookup' m
case iface of
@@ -912,7 +917,7 @@ hscCheckSafe' dflags m l = do
let trust = getSafeMode $ mi_trust iface'
trust_own_pkg = mi_trust_pkg iface'
-- check module is trusted
- safeM = trust `elem` [Sf_SafeInferred, Sf_Safe, Sf_Trustworthy]
+ safeM = trust `elem` [Sf_Safe, Sf_Trustworthy]
-- check package is trusted
safeP = packageTrusted trust trust_own_pkg m
-- pkg trust reqs
@@ -927,13 +932,13 @@ hscCheckSafe' dflags m l = do
return (trust == Sf_Trustworthy, pkgRs)
where
- pkgTrustErr = unitBag $ mkPlainErrMsg dflags l $
+ pkgTrustErr = unitBag $ mkErrMsg dflags l (pkgQual dflags) $
sep [ ppr (moduleName m)
<> text ": Can't be safely imported!"
- , text "The package (" <> ppr (modulePackageId m)
+ , text "The package (" <> ppr (modulePackageKey m)
<> text ") the module resides in isn't trusted."
]
- modTrustErr = unitBag $ mkPlainErrMsg dflags l $
+ modTrustErr = unitBag $ mkErrMsg dflags l (pkgQual dflags) $
sep [ ppr (moduleName m)
<> text ": Can't be safely imported!"
, text "The module itself isn't safe." ]
@@ -948,11 +953,9 @@ hscCheckSafe' dflags m l = do
packageTrusted _ _ _
| not (packageTrustOn dflags) = True
packageTrusted Sf_Safe False _ = True
- packageTrusted Sf_SafeInferred False _ = True
packageTrusted _ _ m
| isHomePkg m = True
- | otherwise = trusted $ getPackageDetails (pkgState dflags)
- (modulePackageId m)
+ | otherwise = trusted $ getPackageDetails dflags (modulePackageKey m)
lookup' :: Module -> Hsc (Maybe ModIface)
lookup' m = do
@@ -976,11 +979,11 @@ hscCheckSafe' dflags m l = do
isHomePkg :: Module -> Bool
isHomePkg m
- | thisPackage dflags == modulePackageId m = True
+ | thisPackage dflags == modulePackageKey m = True
| otherwise = False
-- | Check the list of packages are trusted.
-checkPkgTrust :: DynFlags -> [PackageId] -> Hsc ()
+checkPkgTrust :: DynFlags -> [PackageKey] -> Hsc ()
checkPkgTrust dflags pkgs =
case errors of
[] -> return ()
@@ -988,19 +991,20 @@ checkPkgTrust dflags pkgs =
where
errors = catMaybes $ map go pkgs
go pkg
- | trusted $ getPackageDetails (pkgState dflags) pkg
+ | trusted $ getPackageDetails dflags pkg
= Nothing
| otherwise
- = Just $ mkPlainErrMsg dflags noSrcSpan
+ = Just $ mkErrMsg dflags noSrcSpan (pkgQual dflags)
$ text "The package (" <> ppr pkg <> text ") is required" <>
text " to be trusted but it isn't!"
--- | Set module to unsafe and wipe trust information.
+-- | Set module to unsafe and (potentially) wipe trust information.
--
-- Make sure to call this method to set a module to inferred unsafe,
--- it should be a central and single failure method.
-wipeTrust :: TcGblEnv -> WarningMessages -> Hsc TcGblEnv
-wipeTrust tcg_env whyUnsafe = do
+-- it should be a central and single failure method. We only wipe the trust
+-- information when we aren't in a specific Safe Haskell mode.
+markUnsafe :: TcGblEnv -> WarningMessages -> Hsc TcGblEnv
+markUnsafe tcg_env whyUnsafe = do
dflags <- getDynFlags
when (wopt Opt_WarnUnsafe dflags)
@@ -1008,7 +1012,12 @@ wipeTrust tcg_env whyUnsafe = do
mkPlainWarnMsg dflags (warnUnsafeOnLoc dflags) (whyUnsafe' dflags))
liftIO $ writeIORef (tcg_safeInfer tcg_env) False
- return $ tcg_env { tcg_imports = wiped_trust }
+ -- NOTE: Only wipe trust when not in an explicity safe haskell mode. Other
+ -- times inference may be on but we are in Trustworthy mode -- so we want
+ -- to record safe-inference failed but not wipe the trust dependencies.
+ case safeHaskell dflags == Sf_None of
+ True -> return $ tcg_env { tcg_imports = wiped_trust }
+ False -> return tcg_env
where
wiped_trust = (tcg_imports tcg_env) { imp_trust_pkgs = [] }
@@ -1018,7 +1027,7 @@ wipeTrust tcg_env whyUnsafe = do
, nest 4 $ (vcat $ badFlags df) $+$
(vcat $ pprErrMsgBagWithLoc whyUnsafe)
]
- badFlags df = concat $ map (badFlag df) unsafeFlags
+ badFlags df = concat $ map (badFlag df) unsafeFlagsForInfer
badFlag df (str,loc,on,_)
| on df = [mkLocMessage SevOutput (loc df) $
text str <+> text "is not allowed in Safe Haskell"]
@@ -1070,18 +1079,16 @@ hscSimpleIface' tc_result mb_old_iface = do
return (new_iface, no_change, details)
hscNormalIface :: HscEnv
- -> FilePath
-> ModGuts
-> Maybe Fingerprint
-> IO (ModIface, Bool, ModDetails, CgGuts)
-hscNormalIface hsc_env extCore_filename simpl_result mb_old_iface =
- runHsc hsc_env $ hscNormalIface' extCore_filename simpl_result mb_old_iface
+hscNormalIface hsc_env simpl_result mb_old_iface =
+ runHsc hsc_env $ hscNormalIface' simpl_result mb_old_iface
-hscNormalIface' :: FilePath
- -> ModGuts
+hscNormalIface' :: ModGuts
-> Maybe Fingerprint
-> Hsc (ModIface, Bool, ModDetails, CgGuts)
-hscNormalIface' extCore_filename simpl_result mb_old_iface = do
+hscNormalIface' simpl_result mb_old_iface = do
hsc_env <- getHscEnv
(cg_guts, details) <- {-# SCC "CoreTidy" #-}
liftIO $ tidyProgram hsc_env simpl_result
@@ -1096,11 +1103,6 @@ hscNormalIface' extCore_filename simpl_result mb_old_iface = do
ioMsgMaybe $
mkIface hsc_env mb_old_iface details simpl_result
- -- Emit external core
- -- This should definitely be here and not after CorePrep,
- -- because CorePrep produces unqualified constructor wrapper declarations,
- -- so its output isn't valid External Core (without some preprocessing).
- liftIO $ emitExternalCore (hsc_dflags hsc_env) extCore_filename cg_guts
liftIO $ dumpIfaceStats hsc_env
-- Return the prepared code.
@@ -1147,7 +1149,7 @@ hscGenHardCode hsc_env cgguts mod_summary output_filename = do
-- PREPARE FOR CODE GENERATION
-- Do saturation and convert to A-normal form
prepd_binds <- {-# SCC "CorePrep" #-}
- corePrepPgm dflags hsc_env core_binds data_tycons ;
+ corePrepPgm hsc_env location core_binds data_tycons ;
----------------- Convert to STG ------------------
(stg_binds, cost_centre_info)
<- {-# SCC "CoreToStg" #-}
@@ -1158,8 +1160,15 @@ hscGenHardCode hsc_env cgguts mod_summary output_filename = do
------------------ Code generation ------------------
- cmms <- {-# SCC "NewCodeGen" #-}
- tryNewCodeGen hsc_env this_mod data_tycons
+ -- The back-end is streamed: each top-level function goes
+ -- from Stg all the way to asm before dealing with the next
+ -- top-level function, so showPass isn't very useful here.
+ -- Hence we have one showPass for the whole backend, the
+ -- next showPass after this will be "Assembler".
+ showPass dflags "CodeGen"
+
+ cmms <- {-# SCC "StgCmm" #-}
+ doCodeGen hsc_env this_mod data_tycons
cost_centre_info
stg_binds hpc_info
@@ -1203,7 +1212,7 @@ hscInteractive hsc_env cgguts mod_summary = do
-- PREPARE FOR CODE GENERATION
-- Do saturation and convert to A-normal form
prepd_binds <- {-# SCC "CorePrep" #-}
- corePrepPgm dflags hsc_env core_binds data_tycons
+ corePrepPgm hsc_env location core_binds data_tycons
----------------- Generate byte code ------------------
comp_bc <- byteCodeGen dflags this_mod prepd_binds data_tycons mod_breaks
------------------ Create f-x-dynamic C-side stuff ---
@@ -1236,15 +1245,15 @@ hscCompileCmmFile hsc_env filename output_filename = runHsc hsc_env $ do
-------------------- Stuff for new code gen ---------------------
-tryNewCodeGen :: HscEnv -> Module -> [TyCon]
- -> CollectedCCs
- -> [StgBinding]
- -> HpcInfo
- -> IO (Stream IO CmmGroup ())
+doCodeGen :: HscEnv -> Module -> [TyCon]
+ -> CollectedCCs
+ -> [StgBinding]
+ -> HpcInfo
+ -> IO (Stream IO CmmGroup ())
-- Note we produce a 'Stream' of CmmGroups, so that the
-- backend can be run incrementally. Otherwise it generates all
-- the C-- up front, which has a significant space cost.
-tryNewCodeGen hsc_env this_mod data_tycons
+doCodeGen hsc_env this_mod data_tycons
cost_centre_info stg_binds hpc_info = do
let dflags = hsc_dflags hsc_env
@@ -1357,11 +1366,7 @@ hscStmtWithLocation hsc_env0 stmt source linenumber =
Just parsed_stmt -> do
-- Rename and typecheck it
hsc_env <- getHscEnv
- let interactive_hsc_env = setInteractivePackage hsc_env
- -- Bindings created here belong to the interactive package
- -- See Note [The interactive package] in HscTypes
- -- (NB: maybe not necessary, since Stmts bind only Ids)
- (ids, tc_expr, fix_env) <- ioMsgMaybe $ tcRnStmt interactive_hsc_env parsed_stmt
+ (ids, tc_expr, fix_env) <- ioMsgMaybe $ tcRnStmt hsc_env parsed_stmt
-- Desugar it
ds_expr <- ioMsgMaybe $ deSugarExpr hsc_env tc_expr
@@ -1369,7 +1374,7 @@ hscStmtWithLocation hsc_env0 stmt source linenumber =
handleWarnings
-- Then code-gen, and link it
- -- It's important NOT to have package 'interactive' as thisPackageId
+ -- It's important NOT to have package 'interactive' as thisPackageKey
-- for linking, else we try to link 'main' and can't find it.
-- Whereas the linker already knows to ignore 'interactive'
let src_span = srcLocSpan interactiveSrcLoc
@@ -1397,10 +1402,7 @@ hscDeclsWithLocation hsc_env0 str source linenumber =
{- Rename and typecheck it -}
hsc_env <- getHscEnv
- let interactive_hsc_env = setInteractivePackage hsc_env
- -- Bindings created here belong to the interactive package
- -- See Note [The interactive package] in HscTypes
- tc_gblenv <- ioMsgMaybe $ tcRnDeclsi interactive_hsc_env decls
+ tc_gblenv <- ioMsgMaybe $ tcRnDeclsi hsc_env decls
{- Grab the new instances -}
-- We grab the whole environment because of the overlapping that may have
@@ -1434,7 +1436,7 @@ hscDeclsWithLocation hsc_env0 str source linenumber =
{- Prepare For Code Generation -}
-- Do saturation and convert to A-normal form
prepd_binds <- {-# SCC "CorePrep" #-}
- liftIO $ corePrepPgm dflags hsc_env core_binds data_tycons
+ liftIO $ corePrepPgm hsc_env iNTERACTIVELoc core_binds data_tycons
{- Generate byte code -}
cbc <- liftIO $ byteCodeGen dflags this_mod
@@ -1540,11 +1542,11 @@ hscParseThingWithLocation source linenumber parser str
return thing
hscCompileCore :: HscEnv -> Bool -> SafeHaskellMode -> ModSummary
- -> CoreProgram -> FilePath -> FilePath -> IO ()
-hscCompileCore hsc_env simplify safe_mode mod_summary binds output_filename extCore_filename
+ -> CoreProgram -> FilePath -> IO ()
+hscCompileCore hsc_env simplify safe_mode mod_summary binds output_filename
= runHsc hsc_env $ do
guts <- maybe_simplify (mkModGuts (ms_mod mod_summary) safe_mode binds)
- (iface, changed, _details, cgguts) <- hscNormalIface' extCore_filename guts Nothing
+ (iface, changed, _details, cgguts) <- hscNormalIface' guts Nothing
liftIO $ hscWriteIface (hsc_dflags hsc_env) iface changed mod_summary
_ <- liftIO $ hscGenHardCode hsc_env cgguts mod_summary output_filename
return ()
diff --git a/compiler/main/HscStats.hs b/compiler/main/HscStats.hs
index 715ee8130c91..4f901b1849c3 100644
--- a/compiler/main/HscStats.hs
+++ b/compiler/main/HscStats.hs
@@ -132,7 +132,7 @@ ppSourceStats short (L _ (HsModule _ exports imports ldecls _ _))
class_info decl@(ClassDecl {})
= (classops, addpr (sum3 (map count_bind methods)))
where
- methods = map (unLoc . snd) $ bagToList (tcdMeths decl)
+ methods = map unLoc $ bagToList (tcdMeths decl)
(_, classops, _, _, _) = count_sigs (map unLoc (tcdSigs decl))
class_info _ = (0,0)
@@ -147,7 +147,7 @@ ppSourceStats short (L _ (HsModule _ exports imports ldecls _ _))
(addpr (sum3 (map count_bind methods)),
ss, is, length ats, length adts)
where
- methods = map (unLoc . snd) $ bagToList inst_meths
+ methods = map unLoc $ bagToList inst_meths
-- TODO: use Sum monoid
addpr :: (Int,Int,Int) -> Int
diff --git a/compiler/main/HscTypes.lhs b/compiler/main/HscTypes.lhs
index b8ecc109d068..123b0777fcec 100644
--- a/compiler/main/HscTypes.lhs
+++ b/compiler/main/HscTypes.lhs
@@ -4,6 +4,7 @@
\section[HscTypes]{Types for the per-module compiler}
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable, ScopedTypeVariables #-}
-- | Types for the per-module compiler
module HscTypes (
@@ -53,6 +54,7 @@ module HscTypes (
setInteractivePrintName, icInteractiveModule,
InteractiveImport(..), setInteractivePackage,
mkPrintUnqualified, pprModulePrefix,
+ mkQualPackage, mkQualModule, pkgQual,
-- * Interfaces
ModIface(..), mkIfaceWarnCache, mkIfaceHashCache, mkIfaceFixCache,
@@ -71,7 +73,7 @@ module HscTypes (
TypeEnv, lookupType, lookupTypeHscEnv, mkTypeEnv, emptyTypeEnv,
typeEnvFromEntities, mkTypeEnvWithImplicits,
extendTypeEnv, extendTypeEnvList,
- extendTypeEnvWithIds, extendTypeEnvWithPatSyns,
+ extendTypeEnvWithIds,
lookupTypeEnv,
typeEnvElts, typeEnvTyCons, typeEnvIds, typeEnvPatSyns,
typeEnvDataCons, typeEnvCoAxioms, typeEnvClasses,
@@ -442,7 +444,7 @@ instance Outputable TargetId where
-- | Helps us find information about modules in the home package
type HomePackageTable = ModuleNameEnv HomeModInfo
-- Domain = modules in the home package that have been fully compiled
- -- "home" package name cached here for convenience
+ -- "home" package key cached here for convenience
-- | Helps us find information about modules in the imported packages
type PackageIfaceTable = ModuleEnv ModIface
@@ -633,26 +635,26 @@ type FinderCache = ModuleNameEnv FindResult
data FindResult
= Found ModLocation Module
-- ^ The module was found
- | NoPackage PackageId
+ | NoPackage PackageKey
-- ^ The requested package was not found
- | FoundMultiple [PackageId]
+ | FoundMultiple [(Module, ModuleOrigin)]
-- ^ _Error_: both in multiple packages
-- | Not found
| NotFound
{ fr_paths :: [FilePath] -- Places where I looked
- , fr_pkg :: Maybe PackageId -- Just p => module is in this package's
+ , fr_pkg :: Maybe PackageKey -- Just p => module is in this package's
-- manifest, but couldn't find
-- the .hi file
- , fr_mods_hidden :: [PackageId] -- Module is in these packages,
+ , fr_mods_hidden :: [PackageKey] -- Module is in these packages,
-- but the *module* is hidden
- , fr_pkgs_hidden :: [PackageId] -- Module is in these packages,
+ , fr_pkgs_hidden :: [PackageKey] -- Module is in these packages,
-- but the *package* is hidden
- , fr_suggestions :: [Module] -- Possible mis-spelled modules
+ , fr_suggestions :: [ModuleSuggestion] -- Possible mis-spelled modules
}
-- | Cache that remembers where we found a particular module. Contains both
@@ -951,7 +953,8 @@ data ModDetails
-- The next two fields are created by the typechecker
md_exports :: [AvailInfo],
md_types :: !TypeEnv, -- ^ Local type environment for this particular module
- md_insts :: ![ClsInst], -- ^ 'DFunId's for the instances in this module
+ -- Includes Ids, TyCons, PatSyns
+ md_insts :: ![ClsInst], -- ^ 'DFunId's for the instances in this module
md_fam_insts :: ![FamInst],
md_rules :: ![CoreRule], -- ^ Domain may include 'Id's from other modules
md_anns :: ![Annotation], -- ^ Annotations present in this module: currently
@@ -993,8 +996,8 @@ data ModGuts
mg_rdr_env :: !GlobalRdrEnv, -- ^ Top-level lexical environment
-- These fields all describe the things **declared in this module**
- mg_fix_env :: !FixityEnv, -- ^ Fixities declared in this module
- -- ToDo: I'm unconvinced this is actually used anywhere
+ mg_fix_env :: !FixityEnv, -- ^ Fixities declared in this module.
+ -- Used for creating interface files.
mg_tcs :: ![TyCon], -- ^ TyCons declared in this module
-- (includes TyCons for classes)
mg_insts :: ![ClsInst], -- ^ Class instances declared in this module
@@ -1065,7 +1068,7 @@ data CgGuts
-- as part of the code-gen of tycons
cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs
- cg_dep_pkgs :: ![PackageId], -- ^ Dependent packages, used to
+ cg_dep_pkgs :: ![PackageKey], -- ^ Dependent packages, used to
-- generate #includes for C code gen
cg_hpc_info :: !HpcInfo, -- ^ Program coverage tick box information
cg_modBreaks :: !ModBreaks -- ^ Module breakpoints
@@ -1098,13 +1101,13 @@ appendStubC (ForeignStubs h c) c_code = ForeignStubs h (c $$ c_code)
Note [The interactive package]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Type and class declarations at the command prompt are treated as if
-they were defined in modules
+Type, class, and value declarations at the command prompt are treated
+as if they were defined in modules
interactive:Ghci1
interactive:Ghci2
...etc...
with each bunch of declarations using a new module, all sharing a
-common package 'interactive' (see Module.interactivePackageId, and
+common package 'interactive' (see Module.interactivePackageKey, and
PrelNames.mkInteractiveModule).
This scheme deals well with shadowing. For example:
@@ -1119,10 +1122,10 @@ shadowed by the second declaration. But it has a respectable
qualified name (Ghci1.T), and its source location says where it was
defined.
-So the main invariant continues to hold, that in any session an original
-name M.T only refers to oe unique thing. (In a previous iteration both
-the T's above were called :Interactive.T, albeit with different uniques,
-which gave rise to all sorts of trouble.)
+So the main invariant continues to hold, that in any session an
+original name M.T only refers to one unique thing. (In a previous
+iteration both the T's above were called :Interactive.T, albeit with
+different uniques, which gave rise to all sorts of trouble.)
The details are a bit tricky though:
@@ -1132,25 +1135,29 @@ The details are a bit tricky though:
* ic_tythings contains only things from the 'interactive' package.
* Module from the 'interactive' package (Ghci1, Ghci2 etc) never go
- in the Home Package Table (HPT). When you say :load, that's when
+ in the Home Package Table (HPT). When you say :load, that's when we
extend the HPT.
* The 'thisPackage' field of DynFlags is *not* set to 'interactive'.
- It stays as 'main' (or whatever -package-name says), and is the
+ It stays as 'main' (or whatever -this-package-key says), and is the
package to which :load'ed modules are added to.
* So how do we arrange that declarations at the command prompt get
- to be in the 'interactive' package? By setting 'thisPackage' just
- before the typecheck/rename step for command-line processing;
- see the calls to HscTypes.setInteractivePackage in
- HscMain.hscDeclsWithLocation and hscStmtWithLocation.
+ to be in the 'interactive' package? Simply by setting the tcg_mod
+ field of the TcGblEnv to "interactive:Ghci1". This is done by the
+ call to initTc in initTcInteractive, initTcForLookup, which in
+ turn get the module from it 'icInteractiveModule' field of the
+ interactive context.
+
+ The 'thisPackage' field stays as 'main' (or whatever -this-package-key says.
* The main trickiness is that the type environment (tcg_type_env and
- fixity envt (tcg_fix_env) now contains entities from all the
- GhciN modules together, rather than just a single module as is usually
- the case. So you can't use "nameIsLocalOrFrom" to decide whether
- to look in the TcGblEnv vs the HPT/PTE. This is a change, but not
- a problem provided you know.
+ fixity envt (tcg_fix_env), and instances (tcg_insts, tcg_fam_insts)
+ now contains entities from all the interactive-package modules
+ (Ghci1, Ghci2, ...) together, rather than just a single module as
+ is usually the case. So you can't use "nameIsLocalOrFrom" to
+ decide whether to look in the TcGblEnv vs the HPT/PTE. This is a
+ change, but not a problem provided you know.
Note [Interactively-bound Ids in GHCi]
@@ -1336,7 +1343,7 @@ extendInteractiveContext ictxt new_tythings
setInteractivePackage :: HscEnv -> HscEnv
-- Set the 'thisPackage' DynFlag to 'interactive'
setInteractivePackage hsc_env
- = hsc_env { hsc_dflags = (hsc_dflags hsc_env) { thisPackage = interactivePackageId } }
+ = hsc_env { hsc_dflags = (hsc_dflags hsc_env) { thisPackage = interactivePackageKey } }
setInteractivePrintName :: InteractiveContext -> Name -> InteractiveContext
setInteractivePrintName ic n = ic{ic_int_print = n}
@@ -1403,11 +1410,28 @@ exposed (say P2), so we use M.T for that, and P1:M.T for the other one.
This is handled by the qual_mod component of PrintUnqualified, inside
the (ppr mod) of case (3), in Name.pprModulePrefix
+Note [Printing package keys]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In the old days, original names were tied to PackageIds, which directly
+corresponded to the entities that users wrote in Cabal files, and were perfectly
+suitable for printing when we need to disambiguate packages. However, with
+PackageKey, the situation is different. First, the key is not a human readable
+at all, so we need to consult the package database to find the appropriate
+PackageId to display. Second, there may be multiple copies of a library visible
+with the same PackageId, in which case we need to disambiguate. For now,
+we just emit the actual package key (which the user can go look up); however,
+another scheme is to (recursively) say which dependencies are different.
+
+NB: When we extend package keys to also have holes, we will have to disambiguate
+those as well.
+
\begin{code}
-- | Creates some functions that work out the best ways to format
--- names for the user according to a set of heuristics
+-- names for the user according to a set of heuristics.
mkPrintUnqualified :: DynFlags -> GlobalRdrEnv -> PrintUnqualified
-mkPrintUnqualified dflags env = (qual_name, qual_mod)
+mkPrintUnqualified dflags env = QueryQualify qual_name
+ (mkQualModule dflags)
+ (mkQualPackage dflags)
where
qual_name mod occ
| [gre] <- unqual_gres
@@ -1440,18 +1464,48 @@ mkPrintUnqualified dflags env = (qual_name, qual_mod)
-- "import M" would resolve unambiguously to P:M. (if P is the
-- current package we can just assume it is unqualified).
- qual_mod mod
- | modulePackageId mod == thisPackage dflags = False
+-- | Creates a function for formatting modules based on two heuristics:
+-- (1) if the module is the current module, don't qualify, and (2) if there
+-- is only one exposed package which exports this module, don't qualify.
+mkQualModule :: DynFlags -> QueryQualifyModule
+mkQualModule dflags mod
+ | modulePackageKey mod == thisPackage dflags = False
- | [pkgconfig] <- [pkg | (pkg,exposed_module) <- lookup,
- exposed pkg && exposed_module],
- packageConfigId pkgconfig == modulePackageId mod
+ | [(_, pkgconfig)] <- lookup,
+ packageConfigId pkgconfig == modulePackageKey mod
-- this says: we are given a module P:M, is there just one exposed package
-- that exposes a module M, and is it package P?
= False
| otherwise = True
where lookup = lookupModuleInAllPackages dflags (moduleName mod)
+
+-- | Creates a function for formatting packages based on two heuristics:
+-- (1) don't qualify if the package in question is "main", and (2) only qualify
+-- with a package key if the package ID would be ambiguous.
+mkQualPackage :: DynFlags -> QueryQualifyPackage
+mkQualPackage dflags pkg_key
+ | pkg_key == mainPackageKey
+ -- Skip the lookup if it's main, since it won't be in the package
+ -- database!
+ = False
+ | searchPackageId dflags pkgid `lengthIs` 1
+ -- this says: we are given a package pkg-0.1@MMM, are there only one
+ -- exposed packages whose package ID is pkg-0.1?
+ = False
+ | otherwise
+ = True
+ where pkg = fromMaybe (pprPanic "qual_pkg" (ftext (packageKeyFS pkg_key)))
+ (lookupPackage dflags pkg_key)
+ pkgid = sourcePackageId pkg
+
+-- | A function which only qualifies package names if necessary; but
+-- qualifies all other identifiers.
+pkgQual :: DynFlags -> PrintUnqualified
+pkgQual dflags = alwaysQualify {
+ queryQualifyPackage = mkQualPackage dflags
+ }
+
\end{code}
@@ -1480,7 +1534,7 @@ Examples:
IfaceClass decl happens to use IfaceDecl recursively for the
associated types, but that's irrelevant here.)
- * Dictionary function Ids are not implict.
+ * Dictionary function Ids are not implicit.
* Axioms for newtypes are implicit (same as above), but axioms
for data/type family instances are *not* implicit (like DFunIds).
@@ -1501,15 +1555,17 @@ implicitTyThings :: TyThing -> [TyThing]
implicitTyThings (AnId _) = []
implicitTyThings (ACoAxiom _cc) = []
implicitTyThings (ATyCon tc) = implicitTyConThings tc
-implicitTyThings (AConLike cl) = case cl of
- RealDataCon dc ->
- -- For data cons add the worker and (possibly) wrapper
- map AnId (dataConImplicitIds dc)
- PatSynCon ps ->
- -- For bidirectional pattern synonyms, add the wrapper
- case patSynWrapper ps of
- Nothing -> []
- Just id -> [AnId id]
+implicitTyThings (AConLike cl) = implicitConLikeThings cl
+
+implicitConLikeThings :: ConLike -> [TyThing]
+implicitConLikeThings (RealDataCon dc)
+ = map AnId (dataConImplicitIds dc)
+ -- For data cons add the worker and (possibly) wrapper
+
+implicitConLikeThings (PatSynCon {})
+ = [] -- Pattern synonyms have no implicit Ids; the wrapper and matcher
+ -- are not "implicit"; they are simply new top-level bindings,
+ -- and they have their own declaration in an interface fiel
implicitClassThings :: Class -> [TyThing]
implicitClassThings cl
@@ -1558,8 +1614,8 @@ implicitCoTyCon tc
-- other declaration.
isImplicitTyThing :: TyThing -> Bool
isImplicitTyThing (AConLike cl) = case cl of
- RealDataCon{} -> True
- PatSynCon ps -> isImplicitId (patSynId ps)
+ RealDataCon {} -> True
+ PatSynCon {} -> False
isImplicitTyThing (AnId id) = isImplicitId id
isImplicitTyThing (ATyCon tc) = isImplicitTyCon tc
isImplicitTyThing (ACoAxiom ax) = isImplicitCoAxiom ax
@@ -1675,17 +1731,6 @@ extendTypeEnvList env things = foldl extendTypeEnv env things
extendTypeEnvWithIds :: TypeEnv -> [Id] -> TypeEnv
extendTypeEnvWithIds env ids
= extendNameEnvList env [(getName id, AnId id) | id <- ids]
-
-extendTypeEnvWithPatSyns :: TypeEnv -> [PatSyn] -> TypeEnv
-extendTypeEnvWithPatSyns env patsyns
- = extendNameEnvList env $ concatMap pat_syn_things patsyns
- where
- pat_syn_things :: PatSyn -> [(Name, TyThing)]
- pat_syn_things ps = (getName ps, AConLike (PatSynCon ps)):
- case patSynWrapper ps of
- Just wrap_id -> [(getName wrap_id, AnId wrap_id)]
- Nothing -> []
-
\end{code}
\begin{code}
@@ -1908,7 +1953,7 @@ data Dependencies
-- I.e. modules that this one imports, or that are in the
-- dep_mods of those directly-imported modules
- , dep_pkgs :: [(PackageId, Bool)]
+ , dep_pkgs :: [(PackageKey, Bool)]
-- ^ All packages transitively below this module
-- I.e. packages to which this module's direct imports belong,
-- or that are in the dep_pkgs of those modules
@@ -2204,37 +2249,50 @@ type ModuleGraph = [ModSummary]
emptyMG :: ModuleGraph
emptyMG = []
--- | A single node in a 'ModuleGraph. The nodes of the module graph are one of:
+-- | A single node in a 'ModuleGraph'. The nodes of the module graph
+-- are one of:
--
-- * A regular Haskell source module
---
-- * A hi-boot source module
---
-- * An external-core source module
+--
data ModSummary
= ModSummary {
- ms_mod :: Module, -- ^ Identity of the module
- ms_hsc_src :: HscSource, -- ^ The module source either plain Haskell, hs-boot or external core
- ms_location :: ModLocation, -- ^ Location of the various files belonging to the module
- ms_hs_date :: UTCTime, -- ^ Timestamp of source file
- ms_obj_date :: Maybe UTCTime, -- ^ Timestamp of object, if we have one
- ms_srcimps :: [Located (ImportDecl RdrName)], -- ^ Source imports of the module
- ms_textual_imps :: [Located (ImportDecl RdrName)], -- ^ Non-source imports of the module from the module *text*
- ms_hspp_file :: FilePath, -- ^ Filename of preprocessed source file
- ms_hspp_opts :: DynFlags, -- ^ Cached flags from @OPTIONS@, @INCLUDE@
- -- and @LANGUAGE@ pragmas in the modules source code
- ms_hspp_buf :: Maybe StringBuffer -- ^ The actual preprocessed source, if we have it
+ ms_mod :: Module,
+ -- ^ Identity of the module
+ ms_hsc_src :: HscSource,
+ -- ^ The module source either plain Haskell, hs-boot or external core
+ ms_location :: ModLocation,
+ -- ^ Location of the various files belonging to the module
+ ms_hs_date :: UTCTime,
+ -- ^ Timestamp of source file
+ ms_obj_date :: Maybe UTCTime,
+ -- ^ Timestamp of object, if we have one
+ ms_srcimps :: [Located (ImportDecl RdrName)],
+ -- ^ Source imports of the module
+ ms_textual_imps :: [Located (ImportDecl RdrName)],
+ -- ^ Non-source imports of the module from the module *text*
+ ms_hspp_file :: FilePath,
+ -- ^ Filename of preprocessed source file
+ ms_hspp_opts :: DynFlags,
+ -- ^ Cached flags from @OPTIONS@, @INCLUDE@ and @LANGUAGE@
+ -- pragmas in the modules source code
+ ms_hspp_buf :: Maybe StringBuffer
+ -- ^ The actual preprocessed source, if we have it
}
ms_mod_name :: ModSummary -> ModuleName
ms_mod_name = moduleName . ms_mod
ms_imps :: ModSummary -> [Located (ImportDecl RdrName)]
-ms_imps ms = ms_textual_imps ms ++ map mk_additional_import (dynFlagDependencies (ms_hspp_opts ms))
+ms_imps ms =
+ ms_textual_imps ms ++
+ map mk_additional_import (dynFlagDependencies (ms_hspp_opts ms))
where
- -- This is a not-entirely-satisfactory means of creating an import that corresponds to an
- -- import that did not occur in the program text, such as those induced by the use of
- -- plugins (the -plgFoo flag)
+ -- This is a not-entirely-satisfactory means of creating an import
+ -- that corresponds to an import that did not occur in the program
+ -- text, such as those induced by the use of plugins (the -plgFoo
+ -- flag)
mk_additional_import mod_nm = noLoc $ ImportDecl {
ideclName = noLoc mod_nm,
ideclPkgQual = Nothing,
@@ -2484,14 +2542,15 @@ trustInfoToNum it
Sf_Unsafe -> 1
Sf_Trustworthy -> 2
Sf_Safe -> 3
- Sf_SafeInferred -> 4
numToTrustInfo :: Word8 -> IfaceTrustInfo
numToTrustInfo 0 = setSafeMode Sf_None
numToTrustInfo 1 = setSafeMode Sf_Unsafe
numToTrustInfo 2 = setSafeMode Sf_Trustworthy
numToTrustInfo 3 = setSafeMode Sf_Safe
-numToTrustInfo 4 = setSafeMode Sf_SafeInferred
+numToTrustInfo 4 = setSafeMode Sf_Safe -- retained for backwards compat, used
+ -- to be Sf_SafeInfered but we no longer
+ -- differentiate.
numToTrustInfo n = error $ "numToTrustInfo: bad input number! (" ++ show n ++ ")"
instance Outputable IfaceTrustInfo where
@@ -2499,7 +2558,6 @@ instance Outputable IfaceTrustInfo where
ppr (TrustInfo Sf_Unsafe) = ptext $ sLit "unsafe"
ppr (TrustInfo Sf_Trustworthy) = ptext $ sLit "trustworthy"
ppr (TrustInfo Sf_Safe) = ptext $ sLit "safe"
- ppr (TrustInfo Sf_SafeInferred) = ptext $ sLit "safe-inferred"
instance Binary IfaceTrustInfo where
put_ bh iftrust = putByte bh $ trustInfoToNum iftrust
diff --git a/compiler/main/InteractiveEval.hs b/compiler/main/InteractiveEval.hs
index ede519982a90..d60cf56eba23 100644
--- a/compiler/main/InteractiveEval.hs
+++ b/compiler/main/InteractiveEval.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, MagicHash, NondecreasingIndentation, UnboxedTuples #-}
+
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow, 2005-2007
@@ -877,7 +879,7 @@ getContext = withSession $ \HscEnv{ hsc_IC=ic } ->
-- its full top-level scope available.
moduleIsInterpreted :: GhcMonad m => Module -> m Bool
moduleIsInterpreted modl = withSession $ \h ->
- if modulePackageId modl /= thisPackage (hsc_dflags h)
+ if modulePackageKey modl /= thisPackage (hsc_dflags h)
then return False
else case lookupUFM (hsc_HPT h) (moduleName modl) of
Just details -> return (isJust (mi_globals (hm_iface details)))
diff --git a/compiler/main/InteractiveEvalTypes.hs b/compiler/main/InteractiveEvalTypes.hs
index e3324a39a15b..6ea1a2564866 100644
--- a/compiler/main/InteractiveEvalTypes.hs
+++ b/compiler/main/InteractiveEvalTypes.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow, 2005-2007
diff --git a/compiler/main/PackageConfig.hs b/compiler/main/PackageConfig.hs
index d34d9e1f5c5e..864980be9da5 100644
--- a/compiler/main/PackageConfig.hs
+++ b/compiler/main/PackageConfig.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- |
-- Package configuration information: essentially the interface to Cabal, with
-- some utilities
@@ -7,8 +9,8 @@
module PackageConfig (
-- $package_naming
- -- * PackageId
- mkPackageId, packageConfigId,
+ -- * PackageKey
+ mkPackageKey, packageConfigId,
-- * The PackageConfig type: information about a package
PackageConfig,
@@ -24,7 +26,8 @@ module PackageConfig (
import Distribution.InstalledPackageInfo
import Distribution.ModuleName
-import Distribution.Package hiding (PackageId)
+import Distribution.Package hiding (PackageKey, mkPackageKey)
+import qualified Distribution.Package as Cabal
import Distribution.Text
import Distribution.Version
@@ -41,36 +44,33 @@ defaultPackageConfig :: PackageConfig
defaultPackageConfig = emptyInstalledPackageInfo
-- -----------------------------------------------------------------------------
--- PackageId (package names with versions)
+-- PackageKey (package names, versions and dep hash)
-- $package_naming
-- #package_naming#
--- Mostly the compiler deals in terms of 'PackageName's, which don't
--- have the version suffix. This is so that we don't need to know the
--- version for the @-package-name@ flag, or know the versions of
--- wired-in packages like @base@ & @rts@. Versions are confined to the
--- package sub-system.
---
--- This means that in theory you could have multiple base packages installed
--- (for example), and switch between them using @-package@\/@-hide-package@.
---
--- A 'PackageId' is a string of the form @-@.
+-- Mostly the compiler deals in terms of 'PackageKey's, which are md5 hashes
+-- of a package ID, keys of its dependencies, and Cabal flags. You're expected
+-- to pass in the package key in the @-this-package-key@ flag. However, for
+-- wired-in packages like @base@ & @rts@, we don't necessarily know what the
+-- version is, so these are handled specially; see #wired_in_packages#.
--- | Turn a Cabal 'PackageIdentifier' into a GHC 'PackageId'
-mkPackageId :: PackageIdentifier -> PackageId
-mkPackageId = stringToPackageId . display
+-- | Turn a Cabal 'PackageIdentifier' into a GHC 'PackageKey'
+mkPackageKey :: Cabal.PackageKey -> PackageKey
+mkPackageKey = stringToPackageKey . display
--- | Get the GHC 'PackageId' right out of a Cabalish 'PackageConfig'
-packageConfigId :: PackageConfig -> PackageId
-packageConfigId = mkPackageId . sourcePackageId
+-- | Get the GHC 'PackageKey' right out of a Cabalish 'PackageConfig'
+packageConfigId :: PackageConfig -> PackageKey
+packageConfigId = mkPackageKey . packageKey
-- | Turn a 'PackageConfig', which contains GHC 'Module.ModuleName's into a Cabal specific
-- 'InstalledPackageInfo' which contains Cabal 'Distribution.ModuleName.ModuleName's
packageConfigToInstalledPackageInfo :: PackageConfig -> InstalledPackageInfo
packageConfigToInstalledPackageInfo
(pkgconf@(InstalledPackageInfo { exposedModules = e,
+ reexportedModules = r,
hiddenModules = h })) =
pkgconf{ exposedModules = map convert e,
+ reexportedModules = map (fmap convert) r,
hiddenModules = map convert h }
where convert :: Module.ModuleName -> Distribution.ModuleName.ModuleName
convert = (expectJust "packageConfigToInstalledPackageInfo") . simpleParse . moduleNameString
@@ -80,7 +80,9 @@ packageConfigToInstalledPackageInfo
installedPackageInfoToPackageConfig :: InstalledPackageInfo_ String -> PackageConfig
installedPackageInfoToPackageConfig
(pkgconf@(InstalledPackageInfo { exposedModules = e,
+ reexportedModules = r,
hiddenModules = h })) =
pkgconf{ exposedModules = map mkModuleName e,
+ reexportedModules = map (fmap mkModuleName) r,
hiddenModules = map mkModuleName h }
diff --git a/compiler/main/Packages.lhs b/compiler/main/Packages.lhs
index aefb5360edb5..78c8059046d5 100644
--- a/compiler/main/Packages.lhs
+++ b/compiler/main/Packages.lhs
@@ -2,19 +2,29 @@
% (c) The University of Glasgow, 2006
%
\begin{code}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
-- | Package manipulation
module Packages (
module PackageConfig,
- -- * The PackageConfigMap
- PackageConfigMap, emptyPackageConfigMap, lookupPackage,
- extendPackageConfigMap, dumpPackages,
-
-- * Reading the package config, and processing cmdline args
- PackageState(..),
+ PackageState(preloadPackages),
initPackages,
+
+ -- * Querying the package config
+ lookupPackage,
+ resolveInstalledPackageId,
+ searchPackageId,
+ dumpPackages,
+ simpleDumpPackages,
getPackageDetails,
- lookupModuleInAllPackages, lookupModuleWithSuggestions,
+ listVisibleModuleNames,
+ lookupModuleInAllPackages,
+ lookupModuleWithSuggestions,
+ LookupResult(..),
+ ModuleSuggestion(..),
+ ModuleOrigin(..),
-- * Inspecting the set of packages in scope
getPackageIncludePath,
@@ -27,8 +37,12 @@ module Packages (
collectIncludeDirs, collectLibraryPaths, collectLinkOpts,
packageHsLibs,
+ ModuleExport(..),
-- * Utils
+ packageKeyPackageIdString,
+ pprFlag,
+ pprModuleMap,
isDllName
)
where
@@ -49,10 +63,12 @@ import Maybes
import System.Environment ( getEnv )
import Distribution.InstalledPackageInfo
import Distribution.InstalledPackageInfo.Binary
-import Distribution.Package hiding (PackageId,depends)
+import Distribution.Package hiding (depends, PackageKey, mkPackageKey)
+import Distribution.ModuleExport
import FastString
import ErrUtils ( debugTraceMsg, putMsg, MsgDoc )
import Exception
+import Unique
import System.Directory
import System.FilePath as FilePath
@@ -61,6 +77,7 @@ import Control.Monad
import Data.Char (isSpace)
import Data.List as List
import Data.Map (Map)
+import Data.Monoid hiding ((<>))
import qualified Data.Map as Map
import qualified FiniteMap as Map
import qualified Data.Set as Set
@@ -73,12 +90,18 @@ import qualified Data.Set as Set
-- provide.
--
-- The package state is computed by 'initPackages', and kept in DynFlags.
+-- It is influenced by various package flags:
--
--- * @-package @ causes @@ to become exposed, and all other packages
--- with the same name to become hidden.
+-- * @-package @ and @-package-id @ cause @@ to become exposed.
+-- If @-hide-all-packages@ was not specified, these commands also cause
+-- all other packages with the same name to become hidden.
--
-- * @-hide-package @ causes @@ to become hidden.
--
+-- * (there are a few more flags, check below for their semantics)
+--
+-- The package state has the following properties.
+--
-- * Let @exposedPackages@ be the set of packages thus exposed.
-- Let @depExposedPackages@ be the transitive closure from @exposedPackages@ of
-- their dependencies.
@@ -107,39 +130,166 @@ import qualified Data.Set as Set
-- When compiling A, we record in B's Module value whether it's
-- in a different DLL, by setting the DLL flag.
-data PackageState = PackageState {
- pkgIdMap :: PackageConfigMap, -- PackageId -> PackageConfig
- -- The exposed flags are adjusted according to -package and
- -- -hide-package flags, and -ignore-package removes packages.
-
- preloadPackages :: [PackageId],
- -- The packages we're going to link in eagerly. This list
- -- should be in reverse dependency order; that is, a package
- -- is always mentioned before the packages it depends on.
-
- moduleToPkgConfAll :: UniqFM [(PackageConfig,Bool)], -- ModuleEnv mapping
- -- Derived from pkgIdMap.
- -- Maps Module to (pkgconf,exposed), where pkgconf is the
- -- PackageConfig for the package containing the module, and
- -- exposed is True if the package exposes that module.
+-- | Given a module name, there may be multiple ways it came into scope,
+-- possibly simultaneously. This data type tracks all the possible ways
+-- it could have come into scope. Warning: don't use the record functions,
+-- they're partial!
+data ModuleOrigin =
+ -- | Module is hidden, and thus never will be available for import.
+ -- (But maybe the user didn't realize), so we'll still keep track
+ -- of these modules.)
+ ModHidden
+ -- | Module is public, and could have come from some places.
+ | ModOrigin {
+ -- | @Just False@ means that this module is in
+ -- someone's @exported-modules@ list, but that package is hidden;
+ -- @Just True@ means that it is available; @Nothing@ means neither
+ -- applies.
+ fromOrigPackage :: Maybe Bool
+ -- | Is the module available from a reexport of an exposed package?
+ -- There could be multiple.
+ , fromExposedReexport :: [PackageConfig]
+ -- | Is the module available from a reexport of a hidden package?
+ , fromHiddenReexport :: [PackageConfig]
+ -- | Did the module export come from a package flag? (ToDo: track
+ -- more information.
+ , fromPackageFlag :: Bool
+ }
+
+instance Outputable ModuleOrigin where
+ ppr ModHidden = text "hidden module"
+ ppr (ModOrigin e res rhs f) = sep (punctuate comma (
+ (case e of
+ Nothing -> []
+ Just False -> [text "hidden package"]
+ Just True -> [text "exposed package"]) ++
+ (if null res
+ then []
+ else [text "reexport by" <+>
+ sep (map (ppr . packageConfigId) res)]) ++
+ (if null rhs
+ then []
+ else [text "hidden reexport by" <+>
+ sep (map (ppr . packageConfigId) res)]) ++
+ (if f then [text "package flag"] else [])
+ ))
+
+-- | Smart constructor for a module which is in @exposed-modules@. Takes
+-- as an argument whether or not the defining package is exposed.
+fromExposedModules :: Bool -> ModuleOrigin
+fromExposedModules e = ModOrigin (Just e) [] [] False
+
+-- | Smart constructor for a module which is in @reexported-modules@. Takes
+-- as an argument whether or not the reexporting package is expsed, and
+-- also its 'PackageConfig'.
+fromReexportedModules :: Bool -> PackageConfig -> ModuleOrigin
+fromReexportedModules True pkg = ModOrigin Nothing [pkg] [] False
+fromReexportedModules False pkg = ModOrigin Nothing [] [pkg] False
+
+-- | Smart constructor for a module which was bound by a package flag.
+fromFlag :: ModuleOrigin
+fromFlag = ModOrigin Nothing [] [] True
+
+instance Monoid ModuleOrigin where
+ mempty = ModOrigin Nothing [] [] False
+ mappend (ModOrigin e res rhs f) (ModOrigin e' res' rhs' f') =
+ ModOrigin (g e e') (res ++ res') (rhs ++ rhs') (f || f')
+ where g (Just b) (Just b')
+ | b == b' = Just b
+ | otherwise = panic "ModOrigin: package both exposed/hidden"
+ g Nothing x = x
+ g x Nothing = x
+ mappend _ _ = panic "ModOrigin: hidden module redefined"
+
+-- | Is the name from the import actually visible? (i.e. does it cause
+-- ambiguity, or is it only relevant when we're making suggestions?)
+originVisible :: ModuleOrigin -> Bool
+originVisible ModHidden = False
+originVisible (ModOrigin b res _ f) = b == Just True || not (null res) || f
+
+-- | Are there actually no providers for this module? This will never occur
+-- except when we're filtering based on package imports.
+originEmpty :: ModuleOrigin -> Bool
+originEmpty (ModOrigin Nothing [] [] False) = True
+originEmpty _ = False
+
+-- | When we do a plain lookup (e.g. for an import), initially, all we want
+-- to know is if we can find it or not (and if we do and it's a reexport,
+-- what the real name is). If the find fails, we'll want to investigate more
+-- to give a good error message.
+data SimpleModuleConf =
+ SModConf Module PackageConfig ModuleOrigin
+ | SModConfAmbiguous
+
+-- | 'UniqFM' map from 'ModuleName'
+type ModuleNameMap = UniqFM
+
+-- | 'UniqFM' map from 'PackageKey'
+type PackageKeyMap = UniqFM
+
+-- | 'UniqFM' map from 'PackageKey' to 'PackageConfig'
+type PackageConfigMap = PackageKeyMap PackageConfig
+
+-- | 'UniqFM' map from 'PackageKey' to (1) whether or not all modules which
+-- are exposed should be dumped into scope, (2) any custom renamings that
+-- should also be apply, and (3) what package name is associated with the
+-- key, if it might be hidden
+type VisibilityMap =
+ PackageKeyMap (Bool, [(ModuleName, ModuleName)], FastString)
+
+-- | Map from 'ModuleName' to 'Module' to all the origins of the bindings
+-- in scope. The 'PackageConf' is not cached, mostly for convenience reasons
+-- (since this is the slow path, we'll just look it up again).
+type ModuleToPkgConfAll =
+ Map ModuleName (Map Module ModuleOrigin)
+data PackageState = PackageState {
+ -- | A mapping of 'PackageKey' to 'PackageConfig'. This list is adjusted
+ -- so that only valid packages are here. Currently, we also flip the
+ -- exposed/trusted bits based on package flags; however, the hope is to
+ -- stop doing that.
+ pkgIdMap :: PackageConfigMap,
+
+ -- | The packages we're going to link in eagerly. This list
+ -- should be in reverse dependency order; that is, a package
+ -- is always mentioned before the packages it depends on.
+ preloadPackages :: [PackageKey],
+
+ -- | This is a simplified map from 'ModuleName' to original 'Module' and
+ -- package configuration providing it.
+ moduleToPkgConf :: ModuleNameMap SimpleModuleConf,
+
+ -- | This is a full map from 'ModuleName' to all modules which may possibly
+ -- be providing it. These providers may be hidden (but we'll still want
+ -- to report them in error messages), or it may be an ambiguous import.
+ moduleToPkgConfAll :: ModuleToPkgConfAll,
+
+ -- | This is a map from 'InstalledPackageId' to 'PackageKey', since GHC
+ -- internally deals in package keys but the database may refer to installed
+ -- package IDs.
installedPackageIdMap :: InstalledPackageIdMap
}
--- | A PackageConfigMap maps a 'PackageId' to a 'PackageConfig'
-type PackageConfigMap = UniqFM PackageConfig
-
-type InstalledPackageIdMap = Map InstalledPackageId PackageId
-
+type InstalledPackageIdMap = Map InstalledPackageId PackageKey
type InstalledPackageIndex = Map InstalledPackageId PackageConfig
+-- | Empty package configuration map
emptyPackageConfigMap :: PackageConfigMap
emptyPackageConfigMap = emptyUFM
--- | Find the package we know about with the given id (e.g. \"foo-1.0\"), if any
-lookupPackage :: PackageConfigMap -> PackageId -> Maybe PackageConfig
-lookupPackage = lookupUFM
+-- | Find the package we know about with the given key (e.g. @foo_HASH@), if any
+lookupPackage :: DynFlags -> PackageKey -> Maybe PackageConfig
+lookupPackage dflags = lookupPackage' (pkgIdMap (pkgState dflags))
+
+lookupPackage' :: PackageConfigMap -> PackageKey -> Maybe PackageConfig
+lookupPackage' = lookupUFM
+-- | Search for packages with a given package ID (e.g. \"foo-0.1\")
+searchPackageId :: DynFlags -> PackageId -> [PackageConfig]
+searchPackageId dflags pid = filter ((pid ==) . sourcePackageId)
+ (listPackageConfigMap dflags)
+
+-- | Extends the package configuration map with a list of package configs.
extendPackageConfigMap
:: PackageConfigMap -> [PackageConfig] -> PackageConfigMap
extendPackageConfigMap pkg_map new_pkgs
@@ -148,8 +298,20 @@ extendPackageConfigMap pkg_map new_pkgs
-- | Looks up the package with the given id in the package state, panicing if it is
-- not found
-getPackageDetails :: PackageState -> PackageId -> PackageConfig
-getPackageDetails ps pid = expectJust "getPackageDetails" (lookupPackage (pkgIdMap ps) pid)
+getPackageDetails :: DynFlags -> PackageKey -> PackageConfig
+getPackageDetails dflags pid =
+ expectJust "getPackageDetails" (lookupPackage dflags pid)
+
+-- | Get a list of entries from the package database. NB: be careful with
+-- this function, it may not do what you expect it to.
+listPackageConfigMap :: DynFlags -> [PackageConfig]
+listPackageConfigMap dflags = eltsUFM (pkgIdMap (pkgState dflags))
+
+-- | Looks up a 'PackageKey' given an 'InstalledPackageId'
+resolveInstalledPackageId :: DynFlags -> InstalledPackageId -> PackageKey
+resolveInstalledPackageId dflags ipid =
+ expectJust "resolveInstalledPackageId"
+ (Map.lookup ipid (installedPackageIdMap (pkgState dflags)))
-- ----------------------------------------------------------------------------
-- Loading the package db files and building up the package state
@@ -167,7 +329,7 @@ getPackageDetails ps pid = expectJust "getPackageDetails" (lookupPackage (pkgIdM
-- 'packageFlags' field of the 'DynFlags', and it will update the
-- 'pkgState' in 'DynFlags' and return a list of packages to
-- link in.
-initPackages :: DynFlags -> IO (DynFlags, [PackageId])
+initPackages :: DynFlags -> IO (DynFlags, [PackageKey])
initPackages dflags = do
pkg_db <- case pkgDatabase dflags of
Nothing -> readPackageConfigs dflags
@@ -249,17 +411,12 @@ readPackageConfig dflags conf_file = do
return pkg_configs2
setBatchPackageFlags :: DynFlags -> [PackageConfig] -> [PackageConfig]
-setBatchPackageFlags dflags pkgs = (maybeDistrustAll . maybeHideAll) pkgs
+setBatchPackageFlags dflags pkgs = maybeDistrustAll pkgs
where
- maybeHideAll pkgs'
- | gopt Opt_HideAllPackages dflags = map hide pkgs'
- | otherwise = pkgs'
-
maybeDistrustAll pkgs'
| gopt Opt_DistrustAllPackages dflags = map distrust pkgs'
| otherwise = pkgs'
- hide pkg = pkg{ exposed = False }
distrust pkg = pkg{ trusted = False }
-- TODO: This code is duplicated in utils/ghc-pkg/Main.hs
@@ -316,75 +473,88 @@ mungePackagePaths top_dir pkgroot pkg =
-- Modify our copy of the package database based on a package flag
-- (-package, -hide-package, -ignore-package).
+-- | A horrible hack, the problem is the package key we'll turn
+-- up here is going to get edited when we select the wired in
+-- packages, so preemptively pick up the right one. Also, this elem
+-- test is slow. The alternative is to change wired in packages first, but
+-- then we are no longer able to match against package keys e.g. from when
+-- a user passes in a package flag.
+calcKey :: PackageConfig -> PackageKey
+calcKey p | pk <- display (pkgName (sourcePackageId p))
+ , pk `elem` wired_in_pkgids
+ = stringToPackageKey pk
+ | otherwise = packageConfigId p
+
applyPackageFlag
:: DynFlags
-> UnusablePackages
- -> [PackageConfig] -- Initial database
+ -> ([PackageConfig], VisibilityMap) -- Initial database
-> PackageFlag -- flag to apply
- -> IO [PackageConfig] -- new database
+ -> IO ([PackageConfig], VisibilityMap) -- new database
-applyPackageFlag dflags unusable pkgs flag =
- case flag of
- ExposePackage str ->
- case selectPackages (matchingStr str) pkgs unusable of
- Left ps -> packageFlagErr dflags flag ps
- Right (p:ps,qs) -> return (p':ps')
- where p' = p {exposed=True}
- ps' = hideAll (pkgName (sourcePackageId p)) (ps++qs)
- _ -> panic "applyPackageFlag"
+-- ToDo: Unfortunately, we still have to plumb the package config through,
+-- because Safe Haskell trust is still implemented by modifying the database.
+-- Eventually, track that separately and then axe @[PackageConfig]@ from
+-- this fold entirely
- ExposePackageId str ->
- case selectPackages (matchingId str) pkgs unusable of
+applyPackageFlag dflags unusable (pkgs, vm) flag =
+ case flag of
+ ExposePackage arg m_rns ->
+ case selectPackages (matching arg) pkgs unusable of
Left ps -> packageFlagErr dflags flag ps
- Right (p:ps,qs) -> return (p':ps')
- where p' = p {exposed=True}
- ps' = hideAll (pkgName (sourcePackageId p)) (ps++qs)
+ Right (p:_,_) -> return (pkgs, vm')
+ where
+ n = fsPackageName p
+ vm' = addToUFM_C edit vm_cleared (calcKey p)
+ (case m_rns of
+ Nothing -> (True, [], n)
+ Just rns' -> (False, map convRn rns', n))
+ edit (b, rns, n) (b', rns', _) = (b || b', rns ++ rns', n)
+ convRn (a,b) = (mkModuleName a, mkModuleName b)
+ -- ToDo: ATM, -hide-all-packages implicitly triggers change in
+ -- behavior, maybe eventually make it toggleable with a separate
+ -- flag
+ vm_cleared | gopt Opt_HideAllPackages dflags = vm
+ -- NB: -package foo-0.1 (Foo as Foo1) does NOT hide
+ -- other versions of foo. Presence of renaming means
+ -- user probably wanted both.
+ | Just _ <- m_rns = vm
+ | otherwise = filterUFM_Directly
+ (\k (_,_,n') -> k == getUnique (calcKey p)
+ || n /= n') vm
_ -> panic "applyPackageFlag"
HidePackage str ->
case selectPackages (matchingStr str) pkgs unusable of
Left ps -> packageFlagErr dflags flag ps
- Right (ps,qs) -> return (map hide ps ++ qs)
- where hide p = p {exposed=False}
+ Right (ps,_) -> return (pkgs, vm')
+ where vm' = delListFromUFM vm (map calcKey ps)
-- we trust all matching packages. Maybe should only trust first one?
-- and leave others the same or set them untrusted
TrustPackage str ->
case selectPackages (matchingStr str) pkgs unusable of
Left ps -> packageFlagErr dflags flag ps
- Right (ps,qs) -> return (map trust ps ++ qs)
+ Right (ps,qs) -> return (map trust ps ++ qs, vm)
where trust p = p {trusted=True}
DistrustPackage str ->
case selectPackages (matchingStr str) pkgs unusable of
Left ps -> packageFlagErr dflags flag ps
- Right (ps,qs) -> return (map distrust ps ++ qs)
+ Right (ps,qs) -> return (map distrust ps ++ qs, vm)
where distrust p = p {trusted=False}
- _ -> panic "applyPackageFlag"
-
- where
- -- When a package is requested to be exposed, we hide all other
- -- packages with the same name.
- hideAll name ps = map maybe_hide ps
- where maybe_hide p
- | pkgName (sourcePackageId p) == name = p {exposed=False}
- | otherwise = p
-
+ IgnorePackage _ -> panic "applyPackageFlag: IgnorePackage"
selectPackages :: (PackageConfig -> Bool) -> [PackageConfig]
-> UnusablePackages
-> Either [(PackageConfig, UnusablePackageReason)]
([PackageConfig], [PackageConfig])
selectPackages matches pkgs unusable
- = let
- (ps,rest) = partition matches pkgs
- reasons = [ (p, Map.lookup (installedPackageId p) unusable)
- | p <- ps ]
- in
- if all (isJust.snd) reasons
- then Left [ (p, reason) | (p,Just reason) <- reasons ]
- else Right (sortByVersion [ p | (p,Nothing) <- reasons ], rest)
+ = let (ps,rest) = partition matches pkgs
+ in if null ps
+ then Left (filter (matches.fst) (Map.elems unusable))
+ else Right (sortByVersion ps, rest)
-- A package named on the command line can either include the
-- version, or just the name if it is unambiguous.
@@ -396,6 +566,14 @@ matchingStr str p
matchingId :: String -> PackageConfig -> Bool
matchingId str p = InstalledPackageId str == installedPackageId p
+matchingKey :: String -> PackageConfig -> Bool
+matchingKey str p = str == display (packageKey p)
+
+matching :: PackageArg -> PackageConfig -> Bool
+matching (PackageArg str) = matchingStr str
+matching (PackageIdArg str) = matchingId str
+matching (PackageKeyArg str) = matchingKey str
+
sortByVersion :: [InstalledPackageInfo_ m] -> [InstalledPackageInfo_ m]
sortByVersion = sortBy (flip (comparing (pkgVersion.sourcePackageId)))
@@ -409,7 +587,8 @@ packageFlagErr :: DynFlags
-- for missing DPH package we emit a more helpful error message, because
-- this may be the result of using -fdph-par or -fdph-seq.
-packageFlagErr dflags (ExposePackage pkg) [] | is_dph_package pkg
+packageFlagErr dflags (ExposePackage (PackageArg pkg) _) []
+ | is_dph_package pkg
= throwGhcExceptionIO (CmdLineError (showSDoc dflags $ dph_err))
where dph_err = text "the " <> text pkg <> text " package is not installed."
$$ text "To install it: \"cabal install dph\"."
@@ -417,50 +596,37 @@ packageFlagErr dflags (ExposePackage pkg) [] | is_dph_package pkg
packageFlagErr dflags flag reasons
= throwGhcExceptionIO (CmdLineError (showSDoc dflags $ err))
- where err = text "cannot satisfy " <> ppr_flag <>
+ where err = text "cannot satisfy " <> pprFlag flag <>
(if null reasons then empty else text ": ") $$
nest 4 (ppr_reasons $$
+ -- ToDo: this admonition seems a bit dodgy
text "(use -v for more information)")
- ppr_flag = case flag of
- IgnorePackage p -> text "-ignore-package " <> text p
- HidePackage p -> text "-hide-package " <> text p
- ExposePackage p -> text "-package " <> text p
- ExposePackageId p -> text "-package-id " <> text p
- TrustPackage p -> text "-trust " <> text p
- DistrustPackage p -> text "-distrust " <> text p
ppr_reasons = vcat (map ppr_reason reasons)
ppr_reason (p, reason) = pprReason (pprIPkg p <+> text "is") reason
--- -----------------------------------------------------------------------------
--- Hide old versions of packages
-
---
--- hide all packages for which there is also a later version
--- that is already exposed. This just makes it non-fatal to have two
--- versions of a package exposed, which can happen if you install a
--- later version of a package in the user database, for example.
---
-hideOldPackages :: DynFlags -> [PackageConfig] -> IO [PackageConfig]
-hideOldPackages dflags pkgs = mapM maybe_hide pkgs
- where maybe_hide p
- | not (exposed p) = return p
- | (p' : _) <- later_versions = do
- debugTraceMsg dflags 2 $
- (ptext (sLit "hiding package") <+> pprSPkg p <+>
- ptext (sLit "to avoid conflict with later version") <+>
- pprSPkg p')
- return (p {exposed=False})
- | otherwise = return p
- where myname = pkgName (sourcePackageId p)
- myversion = pkgVersion (sourcePackageId p)
- later_versions = [ p | p <- pkgs, exposed p,
- let pkg = sourcePackageId p,
- pkgName pkg == myname,
- pkgVersion pkg > myversion ]
+pprFlag :: PackageFlag -> SDoc
+pprFlag flag = case flag of
+ IgnorePackage p -> text "-ignore-package " <> text p
+ HidePackage p -> text "-hide-package " <> text p
+ ExposePackage a rns -> ppr_arg a <> ppr_rns rns
+ TrustPackage p -> text "-trust " <> text p
+ DistrustPackage p -> text "-distrust " <> text p
+ where ppr_arg arg = case arg of
+ PackageArg p -> text "-package " <> text p
+ PackageIdArg p -> text "-package-id " <> text p
+ PackageKeyArg p -> text "-package-key " <> text p
+ ppr_rns Nothing = empty
+ ppr_rns (Just rns) = char '(' <> hsep (punctuate comma (map ppr_rn rns))
+ <> char ')'
+ ppr_rn (orig, new) | orig == new = text orig
+ | otherwise = text orig <+> text "as" <+> text new
-- -----------------------------------------------------------------------------
-- Wired-in packages
+wired_in_pkgids :: [String]
+wired_in_pkgids = map packageKeyString wiredInPackageKeys
+
findWiredInPackages
:: DynFlags
-> [PackageConfig] -- database
@@ -472,16 +638,6 @@ findWiredInPackages dflags pkgs = do
-- their canonical names (eg. base-1.0 ==> base).
--
let
- wired_in_pkgids :: [String]
- wired_in_pkgids = map packageIdString
- [ primPackageId,
- integerPackageId,
- basePackageId,
- rtsPackageId,
- thPackageId,
- dphSeqPackageId,
- dphParPackageId ]
-
matches :: PackageConfig -> String -> Bool
pc `matches` pid = display (pkgName (sourcePackageId pc)) == pid
@@ -491,9 +647,10 @@ findWiredInPackages dflags pkgs = do
-- one.
--
-- When choosing which package to map to a wired-in package
- -- name, we prefer exposed packages, and pick the latest
- -- version. To override the default choice, -hide-package
- -- could be used to hide newer versions.
+ -- name, we pick the latest version (modern Cabal makes it difficult
+ -- to install multiple versions of wired-in packages, however!)
+ -- To override the default choice, -ignore-package could be used to
+ -- hide newer versions.
--
findWiredInPackage :: [PackageConfig] -> String
-> IO (Maybe InstalledPackageId)
@@ -540,7 +697,9 @@ findWiredInPackages dflags pkgs = do
updateWiredInDependencies pkgs = map upd_pkg pkgs
where upd_pkg p
| installedPackageId p `elem` wired_in_ids
- = p { sourcePackageId = (sourcePackageId p){ pkgVersion = Version [] [] } }
+ = let pid = (sourcePackageId p) { pkgVersion = Version [] [] }
+ in p { sourcePackageId = pid
+ , packageKey = OldPackageKey pid }
| otherwise
= p
@@ -553,7 +712,8 @@ data UnusablePackageReason
| MissingDependencies [InstalledPackageId]
| ShadowedBy InstalledPackageId
-type UnusablePackages = Map InstalledPackageId UnusablePackageReason
+type UnusablePackages = Map InstalledPackageId
+ (PackageConfig, UnusablePackageReason)
pprReason :: SDoc -> UnusablePackageReason -> SDoc
pprReason pref reason = case reason of
@@ -569,7 +729,7 @@ pprReason pref reason = case reason of
reportUnusable :: DynFlags -> UnusablePackages -> IO ()
reportUnusable dflags pkgs = mapM_ report (Map.toList pkgs)
where
- report (ipid, reason) =
+ report (ipid, (_, reason)) =
debugTraceMsg dflags 2 $
pprReason
(ptext (sLit "package") <+>
@@ -589,7 +749,7 @@ findBroken pkgs = go [] Map.empty pkgs
go avail ipids not_avail =
case partitionWith (depsAvailable ipids) not_avail of
([], not_avail) ->
- Map.fromList [ (installedPackageId p, MissingDependencies deps)
+ Map.fromList [ (installedPackageId p, (p, MissingDependencies deps))
| (p,deps) <- not_avail ]
(new_avail, not_avail) ->
go (new_avail ++ avail) new_ipids (map fst not_avail)
@@ -618,19 +778,20 @@ shadowPackages pkgs preferred
in Map.fromList shadowed
where
check (shadowed,pkgmap) pkg
- | Just oldpkg <- lookupUFM pkgmap (packageConfigId pkg)
+ | Just oldpkg <- lookupUFM pkgmap pkgid
, let
ipid_new = installedPackageId pkg
ipid_old = installedPackageId oldpkg
--
, ipid_old /= ipid_new
= if ipid_old `elem` preferred
- then ( (ipid_new, ShadowedBy ipid_old) : shadowed, pkgmap )
- else ( (ipid_old, ShadowedBy ipid_new) : shadowed, pkgmap' )
+ then ((ipid_new, (pkg, ShadowedBy ipid_old)) : shadowed, pkgmap)
+ else ((ipid_old, (oldpkg, ShadowedBy ipid_new)) : shadowed, pkgmap')
| otherwise
= (shadowed, pkgmap')
where
- pkgmap' = addToUFM pkgmap (packageConfigId pkg) pkg
+ pkgid = mkFastString (display (sourcePackageId pkg))
+ pkgmap' = addToUFM pkgmap pkgid pkg
-- -----------------------------------------------------------------------------
@@ -639,7 +800,7 @@ ignorePackages flags pkgs = Map.fromList (concatMap doit flags)
where
doit (IgnorePackage str) =
case partition (matchingStr str) pkgs of
- (ps, _) -> [ (installedPackageId p, IgnoredWithFlag)
+ (ps, _) -> [ (installedPackageId p, (p, IgnoredWithFlag))
| p <- ps ]
-- missing package is not an error for -ignore-package,
-- because a common usage is to -ignore-package P as
@@ -667,11 +828,11 @@ depClosure index ipids = closure Map.empty ipids
mkPackageState
:: DynFlags
-> [PackageConfig] -- initial database
- -> [PackageId] -- preloaded packages
- -> PackageId -- this package
+ -> [PackageKey] -- preloaded packages
+ -> PackageKey -- this package
-> IO (PackageState,
- [PackageId], -- new packages to preload
- PackageId) -- this package, might be modified if the current
+ [PackageKey], -- new packages to preload
+ PackageKey) -- this package, might be modified if the current
-- package is a wired-in package.
mkPackageState dflags pkgs0 preload0 this_package = do
@@ -682,12 +843,12 @@ mkPackageState dflags pkgs0 preload0 this_package = do
1. P = transitive closure of packages selected by -package-id
2. Apply shadowing. When there are multiple packages with the same
- sourcePackageId,
+ packageKey,
* if one is in P, use that one
* otherwise, use the one highest in the package stack
[
- rationale: we cannot use two packages with the same sourcePackageId
- in the same program, because sourcePackageId is the symbol prefix.
+ rationale: we cannot use two packages with the same packageKey
+ in the same program, because packageKey is the symbol prefix.
Hence we must select a consistent set of packages to use. We have
a default algorithm for doing this: packages higher in the stack
shadow those lower down. This default algorithm can be overriden
@@ -735,30 +896,64 @@ mkPackageState dflags pkgs0 preload0 this_package = do
ipid_map = Map.fromList [ (installedPackageId p, p) | p <- pkgs0 ]
- ipid_selected = depClosure ipid_map [ InstalledPackageId i
- | ExposePackageId i <- flags ]
+ ipid_selected = depClosure ipid_map
+ [ InstalledPackageId i
+ | ExposePackage (PackageIdArg i) _ <- flags ]
(ignore_flags, other_flags) = partition is_ignore flags
is_ignore IgnorePackage{} = True
is_ignore _ = False
shadowed = shadowPackages pkgs0_unique ipid_selected
-
ignored = ignorePackages ignore_flags pkgs0_unique
- pkgs0' = filter (not . (`Map.member` (Map.union shadowed ignored)) . installedPackageId) pkgs0_unique
+ isBroken = (`Map.member` (Map.union shadowed ignored)).installedPackageId
+ pkgs0' = filter (not . isBroken) pkgs0_unique
+
broken = findBroken pkgs0'
+
unusable = shadowed `Map.union` ignored `Map.union` broken
+ pkgs1 = filter (not . (`Map.member` unusable) . installedPackageId) pkgs0'
reportUnusable dflags unusable
+ --
+ -- Calculate the initial set of packages, prior to any package flags.
+ -- This set contains the latest version of all valid (not unusable) packages,
+ -- or is empty if we have -hide-all-packages
+ --
+ let preferLater pkg pkg' =
+ case comparing (pkgVersion.sourcePackageId) pkg pkg' of
+ GT -> pkg
+ _ -> pkg'
+ calcInitial m pkg = addToUFM_C preferLater m (fsPackageName pkg) pkg
+ initial = if gopt Opt_HideAllPackages dflags
+ then emptyUFM
+ else foldl' calcInitial emptyUFM pkgs1
+ vis_map0 = foldUFM (\p vm ->
+ if exposed p
+ then addToUFM vm (calcKey p)
+ (True, [], fsPackageName p)
+ else vm)
+ emptyUFM initial
+
--
-- Modify the package database according to the command-line flags
-- (-package, -hide-package, -ignore-package, -hide-all-packages).
+ -- This needs to know about the unusable packages, since if a user tries
+ -- to enable an unusable package, we should let them know.
--
- pkgs1 <- foldM (applyPackageFlag dflags unusable) pkgs0_unique other_flags
- let pkgs2 = filter (not . (`Map.member` unusable) . installedPackageId) pkgs1
+ (pkgs2, vis_map) <- foldM (applyPackageFlag dflags unusable)
+ (pkgs1, vis_map0) other_flags
+ --
+ -- Sort out which packages are wired in. This has to be done last, since
+ -- it modifies the package keys of wired in packages, but when we process
+ -- package arguments we need to key against the old versions.
+ --
+ pkgs3 <- findWiredInPackages dflags pkgs2
+
+ --
-- Here we build up a set of the packages mentioned in -package
-- flags on the command line; these are called the "preload"
-- packages. we link these packages in eagerly. The preload set
@@ -767,22 +962,15 @@ mkPackageState dflags pkgs0 preload0 this_package = do
--
let preload1 = [ installedPackageId p | f <- flags, p <- get_exposed f ]
- get_exposed (ExposePackage s)
- = take 1 $ sortByVersion (filter (matchingStr s) pkgs2)
- -- -package P means "the latest version of P" (#7030)
- get_exposed (ExposePackageId s) = filter (matchingId s) pkgs2
- get_exposed _ = []
+ get_exposed (ExposePackage a _) = take 1 . sortByVersion
+ . filter (matching a)
+ $ pkgs2
+ get_exposed _ = []
- -- hide packages that are subsumed by later versions
- pkgs3 <- hideOldPackages dflags pkgs2
-
- -- sort out which packages are wired in
- pkgs4 <- findWiredInPackages dflags pkgs3
-
- let pkg_db = extendPackageConfigMap emptyPackageConfigMap pkgs4
+ let pkg_db = extendPackageConfigMap emptyPackageConfigMap pkgs3
ipid_map = Map.fromList [ (installedPackageId p, packageConfigId p)
- | p <- pkgs4 ]
+ | p <- pkgs3 ]
lookupIPID ipid@(InstalledPackageId str)
| Just pid <- Map.lookup ipid ipid_map = return pid
@@ -794,7 +982,8 @@ mkPackageState dflags pkgs0 preload0 this_package = do
-- add base & rts to the preload packages
basicLinkedPackages
| gopt Opt_AutoLinkPackages dflags
- = filter (flip elemUFM pkg_db) [basePackageId, rtsPackageId]
+ = filter (flip elemUFM pkg_db)
+ [basePackageKey, rtsPackageKey]
| otherwise = []
-- but in any case remove the current package from the set of
-- preloaded packages so that base/rts does not end up in the
@@ -806,36 +995,118 @@ mkPackageState dflags pkgs0 preload0 this_package = do
dep_preload <- closeDeps dflags pkg_db ipid_map (zip preload3 (repeat Nothing))
let new_dep_preload = filter (`notElem` preload0) dep_preload
- let pstate = PackageState{ preloadPackages = dep_preload,
- pkgIdMap = pkg_db,
- moduleToPkgConfAll = mkModuleMap pkg_db,
- installedPackageIdMap = ipid_map
- }
-
+ let pstate = PackageState{
+ preloadPackages = dep_preload,
+ pkgIdMap = pkg_db,
+ moduleToPkgConf = mkModuleToPkgConf dflags pkg_db ipid_map vis_map,
+ moduleToPkgConfAll = mkModuleToPkgConfAll dflags pkg_db ipid_map vis_map,
+ installedPackageIdMap = ipid_map
+ }
return (pstate, new_dep_preload, this_package)
-- -----------------------------------------------------------------------------
--- Make the mapping from module to package info
-
-mkModuleMap
- :: PackageConfigMap
- -> UniqFM [(PackageConfig, Bool)]
-mkModuleMap pkg_db = foldr extend_modmap emptyUFM pkgids
- where
- pkgids = map packageConfigId (eltsUFM pkg_db)
-
- extend_modmap pkgid modmap =
- addListToUFM_C (++) modmap
- ([(m, [(pkg, True)]) | m <- exposed_mods] ++
- [(m, [(pkg, False)]) | m <- hidden_mods])
- where
- pkg = expectJust "mkModuleMap" (lookupPackage pkg_db pkgid)
- exposed_mods = exposedModules pkg
- hidden_mods = hiddenModules pkg
-
-pprSPkg :: PackageConfig -> SDoc
-pprSPkg p = text (display (sourcePackageId p))
+-- | Makes the mapping from module to package info
+
+-- | This function is generic; we instantiate it
+mkModuleToPkgConfGeneric
+ :: forall m e.
+ -- Empty map, e.g. the initial state of the output
+ m e
+ -- How to create an entry in the map based on the calculated information
+ -> (PackageKey -> ModuleName -> PackageConfig -> ModuleOrigin -> e)
+ -- How to override the origin of an entry (used for renaming)
+ -> (e -> ModuleOrigin -> e)
+ -- How to incorporate a list of entries into the map
+ -> (m e -> [(ModuleName, e)] -> m e)
+ -- The proper arguments
+ -> DynFlags
+ -> PackageConfigMap
+ -> InstalledPackageIdMap
+ -> VisibilityMap
+ -> m e
+mkModuleToPkgConfGeneric emptyMap sing setOrigins addListTo
+ dflags pkg_db ipid_map vis_map =
+ foldl' extend_modmap emptyMap (eltsUFM pkg_db)
+ where
+ extend_modmap modmap pkg = addListTo modmap theBindings
+ where
+ theBindings :: [(ModuleName, e)]
+ theBindings | Just (b,rns,_) <- lookupUFM vis_map (packageConfigId pkg)
+ = newBindings b rns
+ | otherwise = newBindings False []
+
+ newBindings :: Bool -> [(ModuleName, ModuleName)] -> [(ModuleName, e)]
+ newBindings e rns = es e ++ hiddens ++ map rnBinding rns
+
+ rnBinding :: (ModuleName, ModuleName) -> (ModuleName, e)
+ rnBinding (orig, new) = (new, setOrigins origEntry fromFlag)
+ where origEntry = case lookupUFM esmap orig of
+ Just r -> r
+ Nothing -> throwGhcException (CmdLineError (showSDoc dflags
+ (text "package flag: could not find module name" <+>
+ ppr orig <+> text "in package" <+> ppr pk)))
+
+ es :: Bool -> [(ModuleName, e)]
+ es e =
+ [(m, sing pk m pkg (fromExposedModules e)) | m <- exposed_mods] ++
+ [(m, sing pk' m' pkg' (fromReexportedModules e pkg))
+ | ModuleExport{ exportName = m
+ , exportCachedTrueOrig = Just (ipid', m')} <- reexported_mods
+ , let pk' = expectJust "mkModuleToPkgConf" (Map.lookup ipid' ipid_map)
+ pkg' = pkg_lookup pk' ]
+
+ esmap :: UniqFM e
+ esmap = listToUFM (es False) -- parameter here doesn't matter, orig will
+ -- be overwritten
+
+ hiddens = [(m, sing pk m pkg ModHidden) | m <- hidden_mods]
+
+ pk = packageConfigId pkg
+ pkg_lookup = expectJust "mkModuleToPkgConf" . lookupPackage' pkg_db
+
+ exposed_mods = exposedModules pkg
+ reexported_mods = reexportedModules pkg
+ hidden_mods = hiddenModules pkg
+
+-- | This is a quick and efficient module map, which only contains an entry
+-- if it is specified unambiguously.
+mkModuleToPkgConf
+ :: DynFlags
+ -> PackageConfigMap
+ -> InstalledPackageIdMap
+ -> VisibilityMap
+ -> ModuleNameMap SimpleModuleConf
+mkModuleToPkgConf =
+ mkModuleToPkgConfGeneric emptyMap sing setOrigins addListTo
+ where emptyMap = emptyUFM
+ sing pk m pkg = SModConf (mkModule pk m) pkg
+ -- NB: don't put hidden entries in the map, they're not valid!
+ addListTo m xs = addListToUFM_C merge m (filter isVisible xs)
+ isVisible (_, SModConf _ _ o) = originVisible o
+ isVisible (_, SModConfAmbiguous) = False
+ merge (SModConf m pkg o) (SModConf m' _ o')
+ | m == m' = SModConf m pkg (o `mappend` o')
+ | otherwise = SModConfAmbiguous
+ merge _ _ = SModConfAmbiguous
+ setOrigins (SModConf m pkg _) os = SModConf m pkg os
+ setOrigins SModConfAmbiguous _ = SModConfAmbiguous
+
+-- | This is a slow and complete map, which includes information about
+-- everything, including hidden modules
+mkModuleToPkgConfAll
+ :: DynFlags
+ -> PackageConfigMap
+ -> InstalledPackageIdMap
+ -> VisibilityMap
+ -> ModuleToPkgConfAll
+mkModuleToPkgConfAll =
+ mkModuleToPkgConfGeneric emptyMap sing setOrigins addListTo
+ where emptyMap = Map.empty
+ sing pk m _ = Map.singleton (mkModule pk m)
+ addListTo = foldl' merge
+ merge m (k, v) = Map.insertWith (Map.unionWith mappend) k v m
+ setOrigins m os = fmap (const os) m
pprIPkg :: PackageConfig -> SDoc
pprIPkg p = text (display (installedPackageId p))
@@ -852,7 +1123,7 @@ pprIPkg p = text (display (installedPackageId p))
-- use.
-- | Find all the include directories in these and the preload packages
-getPackageIncludePath :: DynFlags -> [PackageId] -> IO [String]
+getPackageIncludePath :: DynFlags -> [PackageKey] -> IO [String]
getPackageIncludePath dflags pkgs =
collectIncludeDirs `fmap` getPreloadPackagesAnd dflags pkgs
@@ -860,7 +1131,7 @@ collectIncludeDirs :: [PackageConfig] -> [FilePath]
collectIncludeDirs ps = nub (filter notNull (concatMap includeDirs ps))
-- | Find all the library paths in these and the preload packages
-getPackageLibraryPath :: DynFlags -> [PackageId] -> IO [String]
+getPackageLibraryPath :: DynFlags -> [PackageKey] -> IO [String]
getPackageLibraryPath dflags pkgs =
collectLibraryPaths `fmap` getPreloadPackagesAnd dflags pkgs
@@ -869,7 +1140,7 @@ collectLibraryPaths ps = nub (filter notNull (concatMap libraryDirs ps))
-- | Find all the link options in these and the preload packages,
-- returning (package hs lib options, extra library options, other flags)
-getPackageLinkOpts :: DynFlags -> [PackageId] -> IO ([String], [String], [String])
+getPackageLinkOpts :: DynFlags -> [PackageKey] -> IO ([String], [String], [String])
getPackageLinkOpts dflags pkgs =
collectLinkOpts dflags `fmap` getPreloadPackagesAnd dflags pkgs
@@ -917,19 +1188,19 @@ packageHsLibs dflags p = map (mkDynName . addSuffix) (hsLibraries p)
| otherwise = '_':t
-- | Find all the C-compiler options in these and the preload packages
-getPackageExtraCcOpts :: DynFlags -> [PackageId] -> IO [String]
+getPackageExtraCcOpts :: DynFlags -> [PackageKey] -> IO [String]
getPackageExtraCcOpts dflags pkgs = do
ps <- getPreloadPackagesAnd dflags pkgs
return (concatMap ccOptions ps)
-- | Find all the package framework paths in these and the preload packages
-getPackageFrameworkPath :: DynFlags -> [PackageId] -> IO [String]
+getPackageFrameworkPath :: DynFlags -> [PackageKey] -> IO [String]
getPackageFrameworkPath dflags pkgs = do
ps <- getPreloadPackagesAnd dflags pkgs
return (nub (filter notNull (concatMap frameworkDirs ps)))
-- | Find all the package frameworks in these and the preload packages
-getPackageFrameworks :: DynFlags -> [PackageId] -> IO [String]
+getPackageFrameworks :: DynFlags -> [PackageKey] -> IO [String]
getPackageFrameworks dflags pkgs = do
ps <- getPreloadPackagesAnd dflags pkgs
return (concatMap frameworks ps)
@@ -937,41 +1208,114 @@ getPackageFrameworks dflags pkgs = do
-- -----------------------------------------------------------------------------
-- Package Utils
--- | Takes a 'Module', and if the module is in a package returns
--- @(pkgconf, exposed)@ where pkgconf is the PackageConfig for that package,
--- and exposed is @True@ if the package exposes the module.
-lookupModuleInAllPackages :: DynFlags -> ModuleName -> [(PackageConfig,Bool)]
+-- | Takes a 'ModuleName', and if the module is in any package returns
+-- list of modules which take that name.
+lookupModuleInAllPackages :: DynFlags
+ -> ModuleName
+ -> [(Module, PackageConfig)]
lookupModuleInAllPackages dflags m
- = case lookupModuleWithSuggestions dflags m of
- Right pbs -> pbs
- Left _ -> []
-
-lookupModuleWithSuggestions
- :: DynFlags -> ModuleName
- -> Either [Module] [(PackageConfig,Bool)]
- -- Lookup module in all packages
- -- Right pbs => found in pbs
- -- Left ms => not found; but here are sugestions
-lookupModuleWithSuggestions dflags m
- = case lookupUFM (moduleToPkgConfAll pkg_state) m of
- Nothing -> Left suggestions
- Just ps -> Right ps
+ = case lookupModuleWithSuggestions dflags m Nothing of
+ LookupFound a b -> [(a,b)]
+ LookupMultiple rs -> map f rs
+ where f (m,_) = (m, expectJust "lookupModule" (lookupPackage dflags
+ (modulePackageKey m)))
+ _ -> []
+
+-- | The result of performing a lookup
+data LookupResult =
+ -- | Found the module uniquely, nothing else to do
+ LookupFound Module PackageConfig
+ -- | Multiple modules with the same name in scope
+ | LookupMultiple [(Module, ModuleOrigin)]
+ -- | No modules found, but there were some hidden ones with
+ -- an exact name match. First is due to package hidden, second
+ -- is due to module being hidden
+ | LookupHidden [(Module, ModuleOrigin)] [(Module, ModuleOrigin)]
+ -- | Nothing found, here are some suggested different names
+ | LookupNotFound [ModuleSuggestion] -- suggestions
+
+data ModuleSuggestion = SuggestVisible ModuleName Module ModuleOrigin
+ | SuggestHidden ModuleName Module ModuleOrigin
+
+lookupModuleWithSuggestions :: DynFlags
+ -> ModuleName
+ -> Maybe FastString
+ -> LookupResult
+lookupModuleWithSuggestions dflags m mb_pn
+ = case lookupUFM (moduleToPkgConf pkg_state) m of
+ Just (SModConf m pkg o) | matches mb_pn pkg o ->
+ ASSERT( originVisible o ) LookupFound m pkg
+ _ -> case Map.lookup m (moduleToPkgConfAll pkg_state) of
+ Nothing -> LookupNotFound suggestions
+ Just xs ->
+ case foldl' classify ([],[],[]) (Map.toList xs) of
+ ([], [], []) -> LookupNotFound suggestions
+ -- NB: Yes, we have to check this case too, since package qualified
+ -- imports could cause the main lookup to fail due to ambiguity,
+ -- but the second lookup to succeed.
+ (_, _, [(m, _)]) -> LookupFound m (mod_pkg m)
+ (_, _, exposed@(_:_)) -> LookupMultiple exposed
+ (hidden_pkg, hidden_mod, []) -> LookupHidden hidden_pkg hidden_mod
where
+ classify (hidden_pkg, hidden_mod, exposed) (m, origin0) =
+ let origin = filterOrigin mb_pn (mod_pkg m) origin0
+ x = (m, origin)
+ in case origin of
+ ModHidden -> (hidden_pkg, x:hidden_mod, exposed)
+ _ | originEmpty origin -> (hidden_pkg, hidden_mod, exposed)
+ | originVisible origin -> (hidden_pkg, hidden_mod, x:exposed)
+ | otherwise -> (x:hidden_pkg, hidden_mod, exposed)
+
+ pkg_lookup = expectJust "lookupModuleWithSuggestions" . lookupPackage dflags
pkg_state = pkgState dflags
+ mod_pkg = pkg_lookup . modulePackageKey
+
+ matches Nothing _ _ = True -- shortcut for efficiency
+ matches mb_pn pkg o = originVisible (filterOrigin mb_pn pkg o)
+
+ -- Filters out origins which are not associated with the given package
+ -- qualifier. No-op if there is no package qualifier. Test if this
+ -- excluded all origins with 'originEmpty'.
+ filterOrigin :: Maybe FastString
+ -> PackageConfig
+ -> ModuleOrigin
+ -> ModuleOrigin
+ filterOrigin Nothing _ o = o
+ filterOrigin (Just pn) pkg o =
+ case o of
+ ModHidden -> if go pkg then ModHidden else mempty
+ ModOrigin { fromOrigPackage = e, fromExposedReexport = res,
+ fromHiddenReexport = rhs }
+ -> ModOrigin {
+ fromOrigPackage = if go pkg then e else Nothing
+ , fromExposedReexport = filter go res
+ , fromHiddenReexport = filter go rhs
+ , fromPackageFlag = False -- always excluded
+ }
+ where go pkg = pn == fsPackageName pkg
+
suggestions
| gopt Opt_HelpfulErrors dflags =
fuzzyLookup (moduleNameString m) all_mods
| otherwise = []
- all_mods :: [(String, Module)] -- All modules
- all_mods = [ (moduleNameString mod_nm, mkModule pkg_id mod_nm)
- | pkg_config <- eltsUFM (pkgIdMap pkg_state)
- , let pkg_id = packageConfigId pkg_config
- , mod_nm <- exposedModules pkg_config ]
+ all_mods :: [(String, ModuleSuggestion)] -- All modules
+ all_mods = sortBy (comparing fst) $
+ [ (moduleNameString m, suggestion)
+ | (m, e) <- Map.toList (moduleToPkgConfAll (pkgState dflags))
+ , suggestion <- map (getSuggestion m) (Map.toList e)
+ ]
+ getSuggestion name (mod, origin) =
+ (if originVisible origin then SuggestVisible else SuggestHidden)
+ name mod origin
+
+listVisibleModuleNames :: DynFlags -> [ModuleName]
+listVisibleModuleNames dflags =
+ Map.keys (moduleToPkgConfAll (pkgState dflags))
-- | Find all the 'PackageConfig' in both the preload packages from 'DynFlags' and corresponding to the list of
-- 'PackageConfig's
-getPreloadPackagesAnd :: DynFlags -> [PackageId] -> IO [PackageConfig]
+getPreloadPackagesAnd :: DynFlags -> [PackageKey] -> IO [PackageConfig]
getPreloadPackagesAnd dflags pkgids =
let
state = pkgState dflags
@@ -981,15 +1325,15 @@ getPreloadPackagesAnd dflags pkgids =
pairs = zip pkgids (repeat Nothing)
in do
all_pkgs <- throwErr dflags (foldM (add_package pkg_map ipid_map) preload pairs)
- return (map (getPackageDetails state) all_pkgs)
+ return (map (getPackageDetails dflags) all_pkgs)
-- Takes a list of packages, and returns the list with dependencies included,
-- in reverse dependency order (a package appears before those it depends on).
closeDeps :: DynFlags
-> PackageConfigMap
- -> Map InstalledPackageId PackageId
- -> [(PackageId, Maybe PackageId)]
- -> IO [PackageId]
+ -> Map InstalledPackageId PackageKey
+ -> [(PackageKey, Maybe PackageKey)]
+ -> IO [PackageKey]
closeDeps dflags pkg_map ipid_map ps
= throwErr dflags (closeDepsErr pkg_map ipid_map ps)
@@ -1000,22 +1344,22 @@ throwErr dflags m
Succeeded r -> return r
closeDepsErr :: PackageConfigMap
- -> Map InstalledPackageId PackageId
- -> [(PackageId,Maybe PackageId)]
- -> MaybeErr MsgDoc [PackageId]
+ -> Map InstalledPackageId PackageKey
+ -> [(PackageKey,Maybe PackageKey)]
+ -> MaybeErr MsgDoc [PackageKey]
closeDepsErr pkg_map ipid_map ps = foldM (add_package pkg_map ipid_map) [] ps
-- internal helper
add_package :: PackageConfigMap
- -> Map InstalledPackageId PackageId
- -> [PackageId]
- -> (PackageId,Maybe PackageId)
- -> MaybeErr MsgDoc [PackageId]
+ -> Map InstalledPackageId PackageKey
+ -> [PackageKey]
+ -> (PackageKey,Maybe PackageKey)
+ -> MaybeErr MsgDoc [PackageKey]
add_package pkg_db ipid_map ps (p, mb_parent)
| p `elem` ps = return ps -- Check if we've already added this package
| otherwise =
- case lookupPackage pkg_db p of
- Nothing -> Failed (missingPackageMsg (packageIdString p) <>
+ case lookupPackage' pkg_db p of
+ Nothing -> Failed (missingPackageMsg (packageKeyString p) <>
missingDependencyMsg mb_parent)
Just pkg -> do
-- Add the package's dependents also
@@ -1035,22 +1379,41 @@ missingPackageErr dflags p
missingPackageMsg :: String -> SDoc
missingPackageMsg p = ptext (sLit "unknown package:") <+> text p
-missingDependencyMsg :: Maybe PackageId -> SDoc
+missingDependencyMsg :: Maybe PackageKey -> SDoc
missingDependencyMsg Nothing = empty
missingDependencyMsg (Just parent)
- = space <> parens (ptext (sLit "dependency of") <+> ftext (packageIdFS parent))
+ = space <> parens (ptext (sLit "dependency of") <+> ftext (packageKeyFS parent))
-- -----------------------------------------------------------------------------
+packageKeyPackageIdString :: DynFlags -> PackageKey -> String
+packageKeyPackageIdString dflags pkg_key
+ | pkg_key == mainPackageKey = "main"
+ | otherwise = maybe "(unknown)"
+ (display . sourcePackageId)
+ (lookupPackage dflags pkg_key)
+
-- | Will the 'Name' come from a dynamically linked library?
-isDllName :: DynFlags -> PackageId -> Module -> Name -> Bool
+isDllName :: DynFlags -> PackageKey -> Module -> Name -> Bool
-- Despite the "dll", I think this function just means that
-- the synbol comes from another dynamically-linked package,
-- and applies on all platforms, not just Windows
-isDllName dflags this_pkg this_mod name
+isDllName dflags _this_pkg this_mod name
| gopt Opt_Static dflags = False
| Just mod <- nameModule_maybe name
- = if modulePackageId mod /= this_pkg
+ -- Issue #8696 - when GHC is dynamically linked, it will attempt
+ -- to load the dynamic dependencies of object files at compile
+ -- time for things like QuasiQuotes or
+ -- TemplateHaskell. Unfortunately, this interacts badly with
+ -- intra-package linking, because we don't generate indirect
+ -- (dynamic) symbols for intra-package calls. This means that if a
+ -- module with an intra-package call is loaded without its
+ -- dependencies, then GHC fails to link. This is the cause of #
+ --
+ -- In the mean time, always force dynamic indirections to be
+ -- generated: when the module name isn't the module being
+ -- compiled, references are dynamic.
+ = if mod /= this_mod
then True
else case dllSplit dflags of
Nothing -> False
@@ -1066,12 +1429,39 @@ isDllName dflags this_pkg this_mod name
-- -----------------------------------------------------------------------------
-- Displaying packages
--- | Show package info on console, if verbosity is >= 3
+-- | Show (very verbose) package info on console, if verbosity is >= 5
dumpPackages :: DynFlags -> IO ()
-dumpPackages dflags
- = do let pkg_map = pkgIdMap (pkgState dflags)
- putMsg dflags $
- vcat (map (text . showInstalledPackageInfo
+dumpPackages = dumpPackages' showInstalledPackageInfo
+
+dumpPackages' :: (InstalledPackageInfo -> String) -> DynFlags -> IO ()
+dumpPackages' showIPI dflags
+ = do putMsg dflags $
+ vcat (map (text . showIPI
. packageConfigToInstalledPackageInfo)
- (eltsUFM pkg_map))
+ (listPackageConfigMap dflags))
+
+-- | Show simplified package info on console, if verbosity == 4.
+-- The idea is to only print package id, and any information that might
+-- be different from the package databases (exposure, trust)
+simpleDumpPackages :: DynFlags -> IO ()
+simpleDumpPackages = dumpPackages' showIPI
+ where showIPI ipi = let InstalledPackageId i = installedPackageId ipi
+ e = if exposed ipi then "E" else " "
+ t = if trusted ipi then "T" else " "
+ in e ++ t ++ " " ++ i
+
+-- | Show the mapping of modules to where they come from.
+pprModuleMap :: DynFlags -> SDoc
+pprModuleMap dflags =
+ vcat (map pprLine (Map.toList (moduleToPkgConfAll (pkgState dflags))))
+ where
+ pprLine (m,e) = ppr m $$ nest 50 (vcat (map (pprEntry m) (Map.toList e)))
+ pprEntry m (m',o)
+ | m == moduleName m' = ppr (modulePackageKey m') <+> parens (ppr o)
+ | otherwise = ppr m' <+> parens (ppr o)
+
+fsPackageName :: PackageConfig -> FastString
+fsPackageName pkg = case packageName (sourcePackageId pkg) of
+ PackageName n -> mkFastString n
+
\end{code}
diff --git a/compiler/main/Packages.lhs-boot b/compiler/main/Packages.lhs-boot
index 3a1712e2da1c..3fd0fd542235 100644
--- a/compiler/main/Packages.lhs-boot
+++ b/compiler/main/Packages.lhs-boot
@@ -1,4 +1,8 @@
\begin{code}
module Packages where
+-- Well, this is kind of stupid...
+import {-# SOURCE #-} Module (PackageKey)
+import {-# SOURCE #-} DynFlags (DynFlags)
data PackageState
+packageKeyPackageIdString :: DynFlags -> PackageKey -> String
\end{code}
diff --git a/compiler/main/PlatformConstants.hs b/compiler/main/PlatformConstants.hs
index 03e146ca7c87..b2ca32be6819 100644
--- a/compiler/main/PlatformConstants.hs
+++ b/compiler/main/PlatformConstants.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-------------------------------------------------------------------------------
--
-- | Platform constants
diff --git a/compiler/main/PprTyThing.hs b/compiler/main/PprTyThing.hs
index 27e739009dca..eed4671b67ac 100644
--- a/compiler/main/PprTyThing.hs
+++ b/compiler/main/PprTyThing.hs
@@ -6,67 +6,90 @@
--
-----------------------------------------------------------------------------
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module PprTyThing (
- pprTyThing,
- pprTyThingInContext,
- pprTyThingLoc,
- pprTyThingInContextLoc,
- pprTyThingHdr,
- pprTypeForUser
+ pprTyThing,
+ pprTyThingInContext,
+ pprTyThingLoc,
+ pprTyThingInContextLoc,
+ pprTyThingHdr,
+ pprTypeForUser,
+ pprFamInst
) where
+#include "HsVersions.h"
+
import TypeRep ( TyThing(..) )
-import ConLike
-import DataCon
-import PatSyn
-import Id
-import TyCon
-import Class
-import Coercion( pprCoAxiom, pprCoAxBranch )
-import CoAxiom( CoAxiom(..), brListMap )
+import CoAxiom ( coAxiomTyCon )
import HscTypes( tyThingParent_maybe )
-import HsBinds( pprPatSynSig )
-import Type( tidyTopType, tidyOpenType, splitForAllTys, funResultTy )
-import Kind( synTyConResKind )
-import TypeRep( pprTvBndrs, pprForAll, suppressKinds )
-import TysPrim( alphaTyVars )
+import MkIface ( tyThingToIfaceDecl )
+import Type ( tidyOpenType )
+import IfaceSyn ( pprIfaceDecl, ShowSub(..), ShowHowMuch(..) )
+import FamInstEnv( FamInst( .. ), FamFlavor(..) )
import TcType
import Name
import VarEnv( emptyTidyEnv )
-import StaticFlags( opt_PprStyle_Debug )
-import DynFlags
import Outputable
import FastString
-import Data.Maybe
-- -----------------------------------------------------------------------------
-- Pretty-printing entities that we get from the GHC API
--- This should be a good source of sample code for using the GHC API to
--- inspect source code entities.
-
-type ShowSub = [Name]
--- [] <=> print all sub-components of the current thing
--- (n:ns) <=> print sub-component 'n' with ShowSub=ns
--- elide other sub-components to "..."
-showAll :: ShowSub
-showAll = []
+{- Note [Pretty-printing TyThings]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We pretty-print a TyThing by converting it to an IfaceDecl,
+and pretty-printing that (see ppr_ty_thing below).
+Here is why:
+
+* When pretty-printing (a type, say), the idiomatic solution is not to
+ "rename type variables on the fly", but rather to "tidy" the type
+ (which gives each variable a distinct print-name), and then
+ pretty-print it (without renaming). Separate the two
+ concerns. Functions like tidyType do this.
+
+* Alas, for type constructors, TyCon, tidying does not work well,
+ because a TyCon includes DataCons which include Types, which mention
+ TyCons. And tidying can't tidy a mutually recursive data structure
+ graph, only trees.
+
+* One alternative would be to ensure that TyCons get type variables
+ with distinct print-names. That's ok for type variables but less
+ easy for kind variables. Processing data type declarations is
+ already so complicated that I don't think it's sensible to add the
+ extra requirement that it generates only "pretty" types and kinds.
+
+* One place the non-pretty names can show up is in GHCi. But another
+ is in interface files. Look at MkIface.tyThingToIfaceDecl which
+ converts a TyThing (i.e. TyCon, Class etc) to an IfaceDecl. And it
+ already does tidying as part of that conversion! Why? Because
+ interface files contains fast-strings, not uniques, so the names
+ must at least be distinct.
+
+So if we convert to IfaceDecl, we get a nice tidy IfaceDecl, and can
+print that. Of course, that means that pretty-printing IfaceDecls
+must be careful to display nice user-friendly results, but that's ok.
+
+See #7730, #8776 for details -}
+
+--------------------
+-- | Pretty-prints a 'FamInst' (type/data family instance) with its defining location.
+pprFamInst :: FamInst -> SDoc
+-- * For data instances we go via pprTyThing of the represntational TyCon,
+-- because there is already much cleverness associated with printing
+-- data type declarations that I don't want to duplicate
+-- * For type instances we print directly here; there is no TyCon
+-- to give to pprTyThing
+--
+-- FamInstEnv.pprFamInst does a more quick-and-dirty job for internal purposes
-showSub :: NamedThing n => ShowSub -> n -> Bool
-showSub [] _ = True
-showSub (n:_) thing = n == getName thing
+pprFamInst (FamInst { fi_flavor = DataFamilyInst rep_tc })
+ = pprTyThingInContextLoc (ATyCon rep_tc)
-showSub_maybe :: NamedThing n => ShowSub -> n -> Maybe ShowSub
-showSub_maybe [] _ = Just []
-showSub_maybe (n:ns) thing = if n == getName thing then Just ns
- else Nothing
+pprFamInst (FamInst { fi_flavor = SynFamilyInst, fi_axiom = axiom
+ , fi_tys = lhs_tys, fi_rhs = rhs })
+ = showWithLoc (pprDefinedAt (getName axiom)) $
+ hang (ptext (sLit "type instance") <+> pprTypeApp (coAxiomTyCon axiom) lhs_tys)
+ 2 (equals <+> ppr rhs)
----------------------------
-- | Pretty-prints a 'TyThing' with its defining location.
@@ -76,7 +99,13 @@ pprTyThingLoc tyThing
-- | Pretty-prints a 'TyThing'.
pprTyThing :: TyThing -> SDoc
-pprTyThing thing = ppr_ty_thing showAll thing
+pprTyThing = ppr_ty_thing False []
+
+-- | Pretty-prints the 'TyThing' header. For functions and data constructors
+-- the function is equivalent to 'pprTyThing' but for type constructors
+-- and classes it prints only the header part of the declaration.
+pprTyThingHdr :: TyThing -> SDoc
+pprTyThingHdr = ppr_ty_thing True []
-- | Pretty-prints a 'TyThing' in context: that is, if the entity
-- is a data constructor, record selector, or class method, then
@@ -87,8 +116,8 @@ pprTyThingInContext thing
= go [] thing
where
go ss thing = case tyThingParent_maybe thing of
- Just parent -> go (getName thing : ss) parent
- Nothing -> ppr_ty_thing ss thing
+ Just parent -> go (getOccName thing : ss) parent
+ Nothing -> ppr_ty_thing False ss thing
-- | Like 'pprTyThingInContext', but adds the defining location.
pprTyThingInContextLoc :: TyThing -> SDoc
@@ -96,280 +125,49 @@ pprTyThingInContextLoc tyThing
= showWithLoc (pprDefinedAt (getName tyThing))
(pprTyThingInContext tyThing)
--- | Pretty-prints the 'TyThing' header. For functions and data constructors
--- the function is equivalent to 'pprTyThing' but for type constructors
--- and classes it prints only the header part of the declaration.
-pprTyThingHdr :: TyThing -> SDoc
-pprTyThingHdr (AnId id) = pprId id
-pprTyThingHdr (AConLike conLike) = case conLike of
- RealDataCon dataCon -> pprDataConSig dataCon
- PatSynCon patSyn -> pprPatSyn patSyn
-pprTyThingHdr (ATyCon tyCon) = pprTyConHdr tyCon
-pprTyThingHdr (ACoAxiom ax) = pprCoAxiom ax
-
------------------------
-ppr_ty_thing :: ShowSub -> TyThing -> SDoc
-ppr_ty_thing _ (AnId id) = pprId id
-ppr_ty_thing _ (AConLike conLike) = case conLike of
- RealDataCon dataCon -> pprDataConSig dataCon
- PatSynCon patSyn -> pprPatSyn patSyn
-ppr_ty_thing ss (ATyCon tyCon) = pprTyCon ss tyCon
-ppr_ty_thing _ (ACoAxiom ax) = pprCoAxiom ax
-
-pprTyConHdr :: TyCon -> SDoc
-pprTyConHdr tyCon
- | Just (fam_tc, tys) <- tyConFamInst_maybe tyCon
- = ptext keyword <+> ptext (sLit "instance") <+> pprTypeApp fam_tc tys
- | Just cls <- tyConClass_maybe tyCon
- = pprClassHdr cls
- | otherwise
- = sdocWithDynFlags $ \dflags ->
- ptext keyword <+> opt_family <+> opt_stupid <+> ppr_bndr tyCon
- <+> pprTvBndrs (suppressKinds dflags (tyConKind tyCon) vars)
- where
- vars | isPrimTyCon tyCon ||
- isFunTyCon tyCon = take (tyConArity tyCon) alphaTyVars
- | otherwise = tyConTyVars tyCon
-
- keyword | isSynTyCon tyCon = sLit "type"
- | isNewTyCon tyCon = sLit "newtype"
- | otherwise = sLit "data"
-
- opt_family
- | isFamilyTyCon tyCon = ptext (sLit "family")
- | otherwise = empty
-
- opt_stupid -- The "stupid theta" part of the declaration
- | isAlgTyCon tyCon = pprThetaArrowTy (tyConStupidTheta tyCon)
- | otherwise = empty -- Returns 'empty' if null theta
-
-pprDataConSig :: DataCon -> SDoc
-pprDataConSig dataCon
- = ppr_bndr dataCon <+> dcolon <+> pprTypeForUser (dataConUserType dataCon)
-
-pprClassHdr :: Class -> SDoc
-pprClassHdr cls
- = sdocWithDynFlags $ \dflags ->
- ptext (sLit "class") <+>
- sep [ pprThetaArrowTy (classSCTheta cls)
- , ppr_bndr cls
- <+> pprTvBndrs (suppressKinds dflags (tyConKind (classTyCon cls)) tvs)
- , pprFundeps funDeps ]
+ppr_ty_thing :: Bool -> [OccName] -> TyThing -> SDoc
+-- We pretty-print 'TyThing' via 'IfaceDecl'
+-- See Note [Pretty-pringint TyThings]
+ppr_ty_thing hdr_only path ty_thing
+ = pprIfaceDecl ss (tyThingToIfaceDecl ty_thing)
where
- (tvs, funDeps) = classTvsFds cls
-
-pprId :: Var -> SDoc
-pprId ident
- = hang (ppr_bndr ident <+> dcolon)
- 2 (pprTypeForUser (idType ident))
-
-pprPatSyn :: PatSyn -> SDoc
-pprPatSyn patSyn
- = pprPatSynSig ident is_bidir args (pprTypeForUser rhs_ty) prov req
- where
- ident = patSynId patSyn
- is_bidir = isJust $ patSynWrapper patSyn
-
- args = fmap pprParendType (patSynTyDetails patSyn)
- prov = pprThetaOpt prov_theta
- req = pprThetaOpt req_theta
-
- pprThetaOpt [] = Nothing
- pprThetaOpt theta = Just $ pprTheta theta
-
- (_univ_tvs, _ex_tvs, (prov_theta, req_theta)) = patSynSig patSyn
- rhs_ty = patSynType patSyn
+ ss = ShowSub { ss_how_much = how_much, ss_ppr_bndr = ppr_bndr }
+ how_much | hdr_only = ShowHeader
+ | otherwise = ShowSome path
+ name = getName ty_thing
+ ppr_bndr :: OccName -> SDoc
+ ppr_bndr | isBuiltInSyntax name
+ = ppr
+ | otherwise
+ = case nameModule_maybe name of
+ Just mod -> \ occ -> getPprStyle $ \sty ->
+ pprModulePrefix sty mod occ <> ppr occ
+ Nothing -> WARN( True, ppr name ) ppr
+ -- Nothing is unexpected here; TyThings have External names
pprTypeForUser :: Type -> SDoc
-- We do two things here.
-- a) We tidy the type, regardless
--- b) If Opt_PrintExplicitForAlls is True, we discard the foralls
--- but we do so `deeply'
+-- b) Swizzle the foralls to the top, so that without
+-- -fprint-explicit-foralls we'll suppress all the foralls
-- Prime example: a class op might have type
--- forall a. C a => forall b. Ord b => stuff
+-- forall a. C a => forall b. Ord b => stuff
-- Then we want to display
--- (C a, Ord b) => stuff
+-- (C a, Ord b) => stuff
pprTypeForUser ty
- = sdocWithDynFlags $ \ dflags ->
- if gopt Opt_PrintExplicitForalls dflags
- then ppr tidy_ty
- else ppr (mkPhiTy ctxt ty')
+ = pprSigmaType (mkSigmaTy tvs ctxt tau)
where
- (_, ctxt, ty') = tcSplitSigmaTy tidy_ty
- (_, tidy_ty) = tidyOpenType emptyTidyEnv ty
+ (tvs, ctxt, tau) = tcSplitSigmaTy tidy_ty
+ (_, tidy_ty) = tidyOpenType emptyTidyEnv ty
-- Often the types/kinds we print in ghci are fully generalised
-- and have no free variables, but it turns out that we sometimes
-- print un-generalised kinds (eg when doing :k T), so it's
-- better to use tidyOpenType here
-pprTyCon :: ShowSub -> TyCon -> SDoc
-pprTyCon ss tyCon
- | Just syn_rhs <- synTyConRhs_maybe tyCon
- = case syn_rhs of
- OpenSynFamilyTyCon -> pp_tc_with_kind
- BuiltInSynFamTyCon {} -> pp_tc_with_kind
-
- ClosedSynFamilyTyCon (CoAxiom { co_ax_branches = branches })
- -> hang closed_family_header
- 2 (vcat (brListMap (pprCoAxBranch tyCon) branches))
-
- AbstractClosedSynFamilyTyCon
- -> closed_family_header <+> ptext (sLit "..")
-
- SynonymTyCon rhs_ty
- -> hang (pprTyConHdr tyCon <+> equals)
- 2 (ppr rhs_ty) -- Don't suppress foralls on RHS type!
-
- -- e.g. type T = forall a. a->a
- | Just cls <- tyConClass_maybe tyCon
- = (pp_roles (== Nominal)) $$ pprClass ss cls
-
- | otherwise
- = (pp_roles (== Representational)) $$ pprAlgTyCon ss tyCon
-
- where
- -- if, for each role, suppress_if role is True, then suppress the role
- -- output
- pp_roles :: (Role -> Bool) -> SDoc
- pp_roles suppress_if
- = sdocWithDynFlags $ \dflags ->
- let roles = suppressKinds dflags (tyConKind tyCon) (tyConRoles tyCon)
- in ppUnless (isFamInstTyCon tyCon || all suppress_if roles) $
- -- Don't display roles for data family instances (yet)
- -- See discussion on Trac #8672.
- ptext (sLit "type role") <+> ppr tyCon <+> hsep (map ppr roles)
-
- pp_tc_with_kind = vcat [ pp_roles (const True)
- , pprTyConHdr tyCon <+> dcolon
- <+> pprTypeForUser (synTyConResKind tyCon) ]
- closed_family_header
- = pp_tc_with_kind <+> ptext (sLit "where")
-
-pprAlgTyCon :: ShowSub -> TyCon -> SDoc
-pprAlgTyCon ss tyCon
- | gadt = pprTyConHdr tyCon <+> ptext (sLit "where") $$
- nest 2 (vcat (ppr_trim (map show_con datacons)))
- | otherwise = hang (pprTyConHdr tyCon)
- 2 (add_bars (ppr_trim (map show_con datacons)))
- where
- datacons = tyConDataCons tyCon
- gadt = any (not . isVanillaDataCon) datacons
-
- ok_con dc = showSub ss dc || any (showSub ss) (dataConFieldLabels dc)
- show_con dc
- | ok_con dc = Just (pprDataConDecl ss gadt dc)
- | otherwise = Nothing
-
-pprDataConDecl :: ShowSub -> Bool -> DataCon -> SDoc
-pprDataConDecl ss gadt_style dataCon
- | not gadt_style = ppr_fields tys_w_strs
- | otherwise = ppr_bndr dataCon <+> dcolon <+>
- sep [ pp_foralls, pprThetaArrowTy theta, pp_tau ]
- -- Printing out the dataCon as a type signature, in GADT style
- where
- (forall_tvs, theta, tau) = tcSplitSigmaTy (dataConUserType dataCon)
- (arg_tys, res_ty) = tcSplitFunTys tau
- labels = dataConFieldLabels dataCon
- stricts = dataConStrictMarks dataCon
- tys_w_strs = zip (map user_ify stricts) arg_tys
- pp_foralls = sdocWithDynFlags $ \dflags ->
- ppWhen (gopt Opt_PrintExplicitForalls dflags)
- (pprForAll forall_tvs)
-
- pp_tau = foldr add (ppr res_ty) tys_w_strs
- add str_ty pp_ty = pprParendBangTy str_ty <+> arrow <+> pp_ty
-
- pprParendBangTy (bang,ty) = ppr bang <> pprParendType ty
- pprBangTy (bang,ty) = ppr bang <> ppr ty
-
- -- See Note [Printing bangs on data constructors]
- user_ify :: HsBang -> HsBang
- user_ify bang | opt_PprStyle_Debug = bang
- user_ify HsStrict = HsUserBang Nothing True
- user_ify (HsUnpack {}) = HsUserBang (Just True) True
- user_ify bang = bang
-
- maybe_show_label (lbl,bty)
- | showSub ss lbl = Just (ppr_bndr lbl <+> dcolon <+> pprBangTy bty)
- | otherwise = Nothing
-
- ppr_fields [ty1, ty2]
- | dataConIsInfix dataCon && null labels
- = sep [pprParendBangTy ty1, pprInfixName dataCon, pprParendBangTy ty2]
- ppr_fields fields
- | null labels
- = ppr_bndr dataCon <+> sep (map pprParendBangTy fields)
- | otherwise
- = ppr_bndr dataCon
- <+> (braces $ sep $ punctuate comma $ ppr_trim $
- map maybe_show_label (zip labels fields))
-
-pprClass :: ShowSub -> Class -> SDoc
-pprClass ss cls
- | null methods && null assoc_ts
- = pprClassHdr cls
- | otherwise
- = vcat [ pprClassHdr cls <+> ptext (sLit "where")
- , nest 2 (vcat $ ppr_trim $
- map show_at assoc_ts ++ map show_meth methods)]
- where
- methods = classMethods cls
- assoc_ts = classATs cls
- show_meth id | showSub ss id = Just (pprClassMethod id)
- | otherwise = Nothing
- show_at tc = case showSub_maybe ss tc of
- Just ss' -> Just (pprTyCon ss' tc)
- Nothing -> Nothing
-
-pprClassMethod :: Id -> SDoc
-pprClassMethod id
- = hang (ppr_bndr id <+> dcolon) 2 (pprTypeForUser op_ty)
- where
- -- Here's the magic incantation to strip off the dictionary
- -- from the class op type. Stolen from IfaceSyn.tyThingToIfaceDecl.
- --
- -- It's important to tidy it *before* splitting it up, so that if
- -- we have class C a b where
- -- op :: forall a. a -> b
- -- then the inner forall on op gets renamed to a1, and we print
- -- (when dropping foralls)
- -- class C a b where
- -- op :: a1 -> b
-
- tidy_sel_ty = tidyTopType (idType id)
- (_sel_tyvars, rho_ty) = splitForAllTys tidy_sel_ty
- op_ty = funResultTy rho_ty
-
-ppr_trim :: [Maybe SDoc] -> [SDoc]
--- Collapse a group of Nothings to a single "..."
-ppr_trim xs
- = snd (foldr go (False, []) xs)
- where
- go (Just doc) (_, so_far) = (False, doc : so_far)
- go Nothing (True, so_far) = (True, so_far)
- go Nothing (False, so_far) = (True, ptext (sLit "...") : so_far)
-
-add_bars :: [SDoc] -> SDoc
-add_bars [] = empty
-add_bars [c] = equals <+> c
-add_bars (c:cs) = sep ((equals <+> c) : map (char '|' <+>) cs)
-
--- Wrap operators in ()
-ppr_bndr :: NamedThing a => a -> SDoc
-ppr_bndr a = parenSymOcc (getOccName a) (ppr (getName a))
-
showWithLoc :: SDoc -> SDoc -> SDoc
showWithLoc loc doc
= hang doc 2 (char '\t' <> comment <+> loc)
- -- The tab tries to make them line up a bit
+ -- The tab tries to make them line up a bit
where
comment = ptext (sLit "--")
-
-{-
-Note [Printing bangs on data constructors]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For imported data constructors the dataConStrictMarks are the
-representation choices (see Note [Bangs on data constructor arguments]
-in DataCon.lhs). So we have to fiddle a little bit here to turn them
-back into user-printable form.
--}
diff --git a/compiler/main/StaticFlags.hs b/compiler/main/StaticFlags.hs
index 01dc3b7275ea..eb7ede00c6c2 100644
--- a/compiler/main/StaticFlags.hs
+++ b/compiler/main/StaticFlags.hs
@@ -1,4 +1,5 @@
-{-# OPTIONS -fno-cse #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-cse #-}
-- -fno-cse is needed for GLOBAL_VAR's to behave properly
-----------------------------------------------------------------------------
diff --git a/compiler/main/SysTools.lhs b/compiler/main/SysTools.lhs
index 2150c6d59482..72fa19b3cca4 100644
--- a/compiler/main/SysTools.lhs
+++ b/compiler/main/SysTools.lhs
@@ -7,6 +7,8 @@
-----------------------------------------------------------------------------
\begin{code}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
module SysTools (
-- Initialisation
initSysTools,
@@ -233,6 +235,8 @@ initSysTools mbMinusB
-- to make that possible, so for now you can't.
gcc_prog <- getSetting "C compiler command"
gcc_args_str <- getSetting "C compiler flags"
+ cpp_prog <- getSetting "Haskell CPP command"
+ cpp_args_str <- getSetting "Haskell CPP flags"
let unreg_gcc_args = if targetUnregisterised
then ["-DNO_REGS", "-DUSE_MINIINTERPRETER"]
else []
@@ -241,6 +245,7 @@ initSysTools mbMinusB
| mkTablesNextToCode targetUnregisterised
= ["-DTABLES_NEXT_TO_CODE"]
| otherwise = []
+ cpp_args= map Option (words cpp_args_str)
gcc_args = map Option (words gcc_args_str
++ unreg_gcc_args
++ tntc_gcc_args)
@@ -283,10 +288,7 @@ initSysTools mbMinusB
-- cpp is derived from gcc on all platforms
-- HACK, see setPgmP below. We keep 'words' here to remember to fix
-- Config.hs one day.
- let cpp_prog = gcc_prog
- cpp_args = Option "-E"
- : map Option (words cRAWCPP_FLAGS)
- ++ gcc_args
+
-- Other things being equal, as and ld are simply gcc
gcc_link_args_str <- getSetting "C compiler link flags"
@@ -602,6 +604,42 @@ figureLlvmVersion dflags = do
return Nothing)
return ver
+{- Note [Windows stack usage]
+
+See: Trac #8870 (and #8834 for related info)
+
+On Windows, occasionally we need to grow the stack. In order to do
+this, we would normally just bump the stack pointer - but there's a
+catch on Windows.
+
+If the stack pointer is bumped by more than a single page, then the
+pages between the initial pointer and the resulting location must be
+properly committed by the Windows virtual memory subsystem. This is
+only needed in the event we bump by more than one page (i.e 4097 bytes
+or more).
+
+Windows compilers solve this by emitting a call to a special function
+called _chkstk, which does this committing of the pages for you.
+
+The reason this was causing a segfault was because due to the fact the
+new code generator tends to generate larger functions, we needed more
+stack space in GHC itself. In the x86 codegen, we needed approximately
+~12kb of stack space in one go, which caused the process to segfault,
+as the intervening pages were not committed.
+
+In the future, we should do the same thing, to make the problem
+completely go away. In the mean time, we're using a workaround: we
+instruct the linker to specify the generated PE as having an initial
+reserved stack size of 8mb, as well as a initial *committed* stack
+size of 8mb. The default committed size was previously only 4k.
+
+Theoretically it's possible to still hit this problem if you request a
+stack bump of more than 8mb in one go. But the amount of code
+necessary is quite large, and 8mb "should be more than enough for
+anyone" right now (he said, before millions of lines of code cried out
+in terror).
+
+-}
{- Note [Run-time linker info]
@@ -691,15 +729,20 @@ getLinkerInfo' dflags = do
-- that doesn't support --version. We can just assume that's
-- what we're using.
return $ DarwinLD []
- OSiOS ->
+ OSiOS ->
-- Ditto for iOS
return $ DarwinLD []
OSMinGW32 ->
-- GHC doesn't support anything but GNU ld on Windows anyway.
-- Process creation is also fairly expensive on win32, so
-- we short-circuit here.
- return $ GnuLD $ map Option ["-Wl,--hash-size=31",
- "-Wl,--reduce-memory-overheads"]
+ return $ GnuLD $ map Option
+ [ -- Reduce ld memory usage
+ "-Wl,--hash-size=31"
+ , "-Wl,--reduce-memory-overheads"
+ -- Increase default stack, see
+ -- Note [Windows stack usage]
+ , "-Xlinker", "--stack=0x800000,0x800000" ]
_ -> do
-- In practice, we use the compiler as the linker here. Pass
-- -Wl,--version to get linker version info.
@@ -745,12 +788,15 @@ getCompilerInfo' dflags = do
-- Regular clang
| any ("clang version" `isPrefixOf`) stde =
return Clang
+ -- XCode 5.1 clang
+ | any ("Apple LLVM version 5.1" `isPrefixOf`) stde =
+ return AppleClang51
-- XCode 5 clang
| any ("Apple LLVM version" `isPrefixOf`) stde =
- return Clang
+ return AppleClang
-- XCode 4.1 clang
| any ("Apple clang version" `isPrefixOf`) stde =
- return Clang
+ return AppleClang
-- Unknown linker.
| otherwise = fail "invalid -v output, or compiler is unsupported"
@@ -763,10 +809,10 @@ getCompilerInfo' dflags = do
)
(\err -> do
debugTraceMsg dflags 2
- (text "Error (figuring out compiler information):" <+>
+ (text "Error (figuring out C compiler information):" <+>
text (show err))
errorMsg dflags $ hang (text "Warning:") 9 $
- text "Couldn't figure out linker information!" $$
+ text "Couldn't figure out C compiler information!" $$
text "Make sure you're using GNU gcc, or clang"
return UnknownCC)
return info
@@ -779,7 +825,57 @@ runLink dflags args = do
args1 = map Option (getOpts dflags opt_l)
args2 = args0 ++ args1 ++ args ++ linkargs
mb_env <- getGccEnv args2
- runSomethingFiltered dflags id "Linker" p args2 mb_env
+ runSomethingFiltered dflags ld_filter "Linker" p args2 mb_env
+ where
+ ld_filter = case (platformOS (targetPlatform dflags)) of
+ OSSolaris2 -> sunos_ld_filter
+ _ -> id
+{-
+ SunOS/Solaris ld emits harmless warning messages about unresolved
+ symbols in case of compiling into shared library when we do not
+ link against all the required libs. That is the case of GHC which
+ does not link against RTS library explicitly in order to be able to
+ choose the library later based on binary application linking
+ parameters. The warnings look like:
+
+Undefined first referenced
+ symbol in file
+stg_ap_n_fast ./T2386_Lib.o
+stg_upd_frame_info ./T2386_Lib.o
+templatezmhaskell_LanguageziHaskellziTHziLib_litE_closure ./T2386_Lib.o
+templatezmhaskell_LanguageziHaskellziTHziLib_appE_closure ./T2386_Lib.o
+templatezmhaskell_LanguageziHaskellziTHziLib_conE_closure ./T2386_Lib.o
+templatezmhaskell_LanguageziHaskellziTHziSyntax_mkNameGzud_closure ./T2386_Lib.o
+newCAF ./T2386_Lib.o
+stg_bh_upd_frame_info ./T2386_Lib.o
+stg_ap_ppp_fast ./T2386_Lib.o
+templatezmhaskell_LanguageziHaskellziTHziLib_stringL_closure ./T2386_Lib.o
+stg_ap_p_fast ./T2386_Lib.o
+stg_ap_pp_fast ./T2386_Lib.o
+ld: warning: symbol referencing errors
+
+ this is actually coming from T2386 testcase. The emitting of those
+ warnings is also a reason why so many TH testcases fail on Solaris.
+
+ Following filter code is SunOS/Solaris linker specific and should
+ filter out only linker warnings. Please note that the logic is a
+ little bit more complex due to the simple reason that we need to preserve
+ any other linker emitted messages. If there are any. Simply speaking
+ if we see "Undefined" and later "ld: warning:..." then we omit all
+ text between (including) the marks. Otherwise we copy the whole output.
+-}
+ sunos_ld_filter :: String -> String
+ sunos_ld_filter = unlines . sunos_ld_filter' . lines
+ sunos_ld_filter' x = if (undefined_found x && ld_warning_found x)
+ then (ld_prefix x) ++ (ld_postfix x)
+ else x
+ breakStartsWith x y = break (isPrefixOf x) y
+ ld_prefix = fst . breakStartsWith "Undefined"
+ undefined_found = not . null . snd . breakStartsWith "Undefined"
+ ld_warn_break = breakStartsWith "ld: warning: symbol referencing errors"
+ ld_postfix = tail . snd . ld_warn_break
+ ld_warning_found = not . null . snd . ld_warn_break
+
runLibtool :: DynFlags -> [Option] -> IO ()
runLibtool dflags args = do
@@ -1270,7 +1366,7 @@ linesPlatform xs =
#endif
-linkDynLib :: DynFlags -> [String] -> [PackageId] -> IO ()
+linkDynLib :: DynFlags -> [String] -> [PackageKey] -> IO ()
linkDynLib dflags0 o_files dep_packages
= do
let -- This is a rather ugly hack to fix dynamically linked
@@ -1293,7 +1389,8 @@ linkDynLib dflags0 o_files dep_packages
let pkg_lib_paths = collectLibraryPaths pkgs
let pkg_lib_path_opts = concatMap get_pkg_lib_path_opts pkg_lib_paths
get_pkg_lib_path_opts l
- | osElfTarget (platformOS (targetPlatform dflags)) &&
+ | ( osElfTarget (platformOS (targetPlatform dflags)) ||
+ osMachOTarget (platformOS (targetPlatform dflags)) ) &&
dynLibLoader dflags == SystemDependent &&
not (gopt Opt_Static dflags)
= ["-L" ++ l, "-Wl,-rpath", "-Wl," ++ l]
@@ -1315,7 +1412,7 @@ linkDynLib dflags0 o_files dep_packages
OSMinGW32 ->
pkgs
_ ->
- filter ((/= rtsPackageId) . packageConfigId) pkgs
+ filter ((/= rtsPackageKey) . packageConfigId) pkgs
let pkg_link_opts = let (package_hs_libs, extra_libs, other_flags) = collectLinkOpts dflags pkgs_no_rts
in package_hs_libs ++ extra_libs ++ other_flags
@@ -1390,9 +1487,7 @@ linkDynLib dflags0 o_files dep_packages
instName <- case dylibInstallName dflags of
Just n -> return n
- Nothing -> do
- pwd <- getCurrentDirectory
- return $ pwd `combine` output_fn
+ Nothing -> return $ "@rpath" `combine` (takeFileName output_fn)
runLink dflags (
map Option verbFlags
++ [ Option "-dynamiclib"
@@ -1419,7 +1514,7 @@ linkDynLib dflags0 o_files dep_packages
-------------------------------------------------------------------
let output_fn = case o_file of { Just s -> s; Nothing -> "a.out"; }
- let buildingRts = thisPackage dflags == rtsPackageId
+ let buildingRts = thisPackage dflags == rtsPackageKey
let bsymbolicFlag = if buildingRts
then -- -Bsymbolic breaks the way we implement
-- hooks in the RTS
diff --git a/compiler/main/TidyPgm.lhs b/compiler/main/TidyPgm.lhs
index 7ab6d569bcd0..d5ae7110fe8d 100644
--- a/compiler/main/TidyPgm.lhs
+++ b/compiler/main/TidyPgm.lhs
@@ -4,6 +4,8 @@
\section{Tidying up Core}
\begin{code}
+{-# LANGUAGE BangPatterns, CPP #-}
+
module TidyPgm (
mkBootModDetailsTc, tidyProgram, globaliseAndTidyId
) where
@@ -21,11 +23,14 @@ import CorePrep
import CoreUtils
import Literal
import Rules
+import PatSyn
+import ConLike
import CoreArity ( exprArity, exprBotStrictness_maybe )
import VarEnv
import VarSet
import Var
import Id
+import MkId ( mkDictSelRhs )
import IdInfo
import InstEnv
import FamInstEnv
@@ -56,9 +61,15 @@ import FastString
import qualified ErrUtils as Err
import Control.Monad
+import qualified Data.ByteString.Char8 as BS
import Data.Function
import Data.List ( sortBy )
-import Data.IORef ( atomicModifyIORef )
+import qualified Data.Map as Map
+import Data.IORef ( atomicModifyIORef, readIORef )
+import Data.Set ( elems )
+import System.IO
+import System.FilePath
+import System.Directory
\end{code}
@@ -129,18 +140,20 @@ mkBootModDetailsTc hsc_env
TcGblEnv{ tcg_exports = exports,
tcg_type_env = type_env, -- just for the Ids
tcg_tcs = tcs,
+ tcg_patsyns = pat_syns,
tcg_insts = insts,
tcg_fam_insts = fam_insts
}
= do { let dflags = hsc_dflags hsc_env
; showPass dflags CoreTidy
- ; let { insts' = map (tidyClsInstDFun globaliseAndTidyId) insts
- ; dfun_ids = map instanceDFunId insts'
+ ; let { insts' = map (tidyClsInstDFun globaliseAndTidyId) insts
+ ; pat_syns' = map (tidyPatSynIds globaliseAndTidyId) pat_syns
+ ; dfun_ids = map instanceDFunId insts'
+ ; pat_syn_ids = concatMap patSynIds pat_syns'
; type_env1 = mkBootTypeEnv (availsToNameSet exports)
- (typeEnvIds type_env) tcs fam_insts
- ; type_env2 = extendTypeEnvWithPatSyns type_env1 (typeEnvPatSyns type_env)
- ; type_env' = extendTypeEnvWithIds type_env2 dfun_ids
+ (typeEnvIds type_env) tcs fam_insts
+ ; type_env' = extendTypeEnvWithIds type_env1 (pat_syn_ids ++ dfun_ids)
}
; return (ModDetails { md_types = type_env'
, md_insts = insts'
@@ -333,19 +346,13 @@ tidyProgram hsc_env (ModGuts { mg_module = mod
; let { final_ids = [ id | id <- bindersOfBinds tidy_binds,
isExternalName (idName id)]
- ; final_patsyns = filter (isExternalName . getName) patsyns
-
- ; type_env' = extendTypeEnvWithIds type_env final_ids
- ; type_env'' = extendTypeEnvWithPatSyns type_env' final_patsyns
-
- ; tidy_type_env = tidyTypeEnv omit_prags type_env''
+ ; type_env1 = extendTypeEnvWithIds type_env final_ids
- ; tidy_insts = map (tidyClsInstDFun (lookup_dfun tidy_type_env)) insts
- -- A DFunId will have a binding in tidy_binds, and so
- -- will now be in final_env, replete with IdInfo
- -- Its name will be unchanged since it was born, but
- -- we want Global, IdInfo-rich (or not) DFunId in the
- -- tidy_insts
+ ; tidy_insts = map (tidyClsInstDFun (lookup_aux_id tidy_type_env)) insts
+ -- A DFunId will have a binding in tidy_binds, and so will now be in
+ -- tidy_type_env, replete with IdInfo. Its name will be unchanged since
+ -- it was born, but we want Global, IdInfo-rich (or not) DFunId in the
+ -- tidy_insts. Similarly the Ids inside a PatSyn.
; tidy_rules = tidyRules tidy_env ext_rules
-- You might worry that the tidy_env contains IdInfo-rich stuff
@@ -354,6 +361,16 @@ tidyProgram hsc_env (ModGuts { mg_module = mod
; tidy_vect_info = tidyVectInfo tidy_env vect_info
+ -- Tidy the Ids inside each PatSyn, very similarly to DFunIds
+ -- and then override the PatSyns in the type_env with the new tidy ones
+ -- This is really the only reason we keep mg_patsyns at all; otherwise
+ -- they could just stay in type_env
+ ; tidy_patsyns = map (tidyPatSynIds (lookup_aux_id tidy_type_env)) patsyns
+ ; type_env2 = extendTypeEnvList type_env1
+ [AConLike (PatSynCon ps) | ps <- tidy_patsyns ]
+
+ ; tidy_type_env = tidyTypeEnv omit_prags type_env2
+
-- See Note [Injecting implicit bindings]
; all_tidy_binds = implicit_binds ++ tidy_binds
@@ -377,6 +394,13 @@ tidyProgram hsc_env (ModGuts { mg_module = mod
(showSDoc dflags (ppr CoreTidy <+> ptext (sLit "rules")))
(pprRulesForUser tidy_rules)
+ -- When dumping to files, we might have markers in there
+ -- that we ought to convert into annotations of the Core tree
+ -- so debug info can refer to it.
+ ; final_binds <- if (gopt Opt_DumpToFile dflags)
+ then extractDumpMarkers dflags all_tidy_binds
+ else return all_tidy_binds
+
-- Print one-line size info
; let cs = coreBindsStats tidy_binds
; when (dopt Opt_D_dump_core_stats dflags)
@@ -389,7 +413,7 @@ tidyProgram hsc_env (ModGuts { mg_module = mod
; return (CgGuts { cg_module = mod,
cg_tycons = alg_tycons,
- cg_binds = all_tidy_binds,
+ cg_binds = final_binds,
cg_foreign = foreign_stubs,
cg_dep_pkgs = map fst $ dep_pkgs deps,
cg_hpc_info = hpc_info,
@@ -405,11 +429,11 @@ tidyProgram hsc_env (ModGuts { mg_module = mod
})
}
-lookup_dfun :: TypeEnv -> Var -> Id
-lookup_dfun type_env dfun_id
- = case lookupTypeEnv type_env (idName dfun_id) of
- Just (AnId dfun_id') -> dfun_id'
- _other -> pprPanic "lookup_dfun" (ppr dfun_id)
+lookup_aux_id :: TypeEnv -> Var -> Id
+lookup_aux_id type_env id
+ = case lookupTypeEnv type_env (idName id) of
+ Just (AnId id') -> id'
+ _other -> pprPanic "lookup_axu_id" (ppr id)
--------------------------
tidyTypeEnv :: Bool -- Compiling without -O, so omit prags
@@ -517,7 +541,7 @@ of exceptions, and finally I gave up the battle:
Note [Injecting implicit bindings]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We inject the implict bindings right at the end, in CoreTidy.
+We inject the implicit bindings right at the end, in CoreTidy.
Some of these bindings, notably record selectors, are not
constructed in an optimised form. E.g. record selector for
data T = MkT { x :: {-# UNPACK #-} !Int }
@@ -559,14 +583,16 @@ Oh: two other reasons for injecting them late:
There is one sort of implicit binding that is injected still later,
namely those for data constructor workers. Reason (I think): it's
really just a code generation trick.... binding itself makes no sense.
-See CorePrep Note [Data constructor workers].
+See Note [Data constructor workers] in CorePrep.
\begin{code}
getTyConImplicitBinds :: TyCon -> [CoreBind]
-getTyConImplicitBinds tc = map get_defn (mapCatMaybes dataConWrapId_maybe (tyConDataCons tc))
+getTyConImplicitBinds tc = map get_defn (mapMaybe dataConWrapId_maybe (tyConDataCons tc))
getClassImplicitBinds :: Class -> [CoreBind]
-getClassImplicitBinds cls = map get_defn (classAllSelIds cls)
+getClassImplicitBinds cls
+ = [ NonRec op (mkDictSelRhs cls val_index)
+ | (op, val_index) <- classAllSelIds cls `zip` [0..] ]
get_defn :: Id -> CoreBind
get_defn id = NonRec id (unfoldingTemplate (realIdUnfolding id))
@@ -1006,7 +1032,7 @@ tidyTopBinds hsc_env this_mod unfold_env init_occ_env binds
------------------------
tidyTopBind :: DynFlags
- -> PackageId
+ -> PackageKey
-> Module
-> Id
-> UnfoldEnv
@@ -1176,7 +1202,7 @@ it as a CAF. In these cases however, we would need to use an additional
CAF list to keep track of non-collectable CAFs.
\begin{code}
-hasCafRefs :: DynFlags -> PackageId -> Module
+hasCafRefs :: DynFlags -> PackageKey -> Module
-> (Id, VarEnv Var) -> Arity -> CoreExpr
-> CafInfo
hasCafRefs dflags this_pkg this_mod p arity expr
@@ -1225,6 +1251,87 @@ cafRefsV (_, p) id
fastOr :: FastBool -> (a -> FastBool) -> a -> FastBool
-- hack for lazy-or over FastBool.
fastOr a f x = fastBool (isFastTrue a || isFastTrue (f x))
+
+--------------------------
+
+extractDumpMarkers :: DynFlags -> CoreProgram -> IO CoreProgram
+extractDumpMarkers dflags binds = do
+ dumps <- readIORef $ generatedDumps dflags
+ let extract binds file = do
+ marks <- extractDumpMarkersFile file
+ return $ map (annotateMarkers dflags marks file) binds
+ foldlM extract binds (elems dumps)
+
+extractDumpMarkersFile :: FilePath -> IO (Map.Map String Int)
+extractDumpMarkersFile dumpFile = do
+ (tmpFile, hout) <- openTempFile (takeDirectory dumpFile) (takeFileName dumpFile)
+
+ -- Open dump file, extract the markers
+ hin <- openFile dumpFile ReadMode
+ let extractMarkers n marks = do
+ end <- hIsEOF hin
+ if end then return marks else do
+ line <- BS.hGetLine hin
+ marks' <- extractMarksLine line n marks
+ hPutChar hout '\n'
+ extractMarkers (n+1) marks'
+ markStart = BS.pack "ann<#"
+ markEnd = BS.pack "#>"
+ extractMarksLine line n marks = do
+ -- Look for annotation start, output everything up to that point
+ let (pre,match) = BS.breakSubstring markStart line
+ BS.hPutStr hout pre
+ case () of
+ _ | BS.null match -> return marks
+ -- If the marker is within a quote, we jump over
+ -- it. This is really more a band-aid, as it's just
+ -- fixing one of the more obvious problems we might
+ -- run into trying to naively "parse" an undefined
+ -- format like this.
+ | openQuotes pre -> case BS.break (== '"') match of
+ (quoted, rest)
+ | BS.null rest -> BS.hPutStrLn hout quoted >> return marks
+ | otherwise -> do BS.hPutStr hout quoted
+ hPutChar hout '"'
+ extractMarksLine (BS.tail rest) n marks
+ | (body, rest) <- BS.breakSubstring markEnd (BS.drop (BS.length markStart) match) ->
+ let !marks' = Map.insert (BS.unpack body) n marks
+ in extractMarksLine (BS.drop (BS.length markEnd) rest) n marks'
+ openQuotes = BS.foldr' (\x -> if x == '"' then not else id) False
+ marks <- extractMarkers 1 Map.empty
+
+ -- Close files, replace old file with stripped one
+ hClose hin
+ hClose hout
+ renameFile tmpFile dumpFile
+ return $! marks
+
+annotateMarkers :: DynFlags -> Map.Map String Int -> FilePath -> CoreBind -> CoreBind
+annotateMarkers dflags marks file = annotBind
+ where annotBind (NonRec b e) = NonRec b $ annot b $ annotExpr e
+ annotBind (Rec bs) = Rec $ map (\(b, e) -> (b, annot b $ annotExpr e)) bs
+
+ annotExpr (App e1 e2) = App (annotExpr e1) (annotExpr e2)
+ annotExpr (Lam b e) = Lam b $ annot b $ annotExpr e
+ annotExpr (Let bs e) = Let (annotBind bs) (annotExpr e)
+ annotExpr (Case e b t as) = Case (annotExpr e) b t $ map (annotAlt b) as
+ annotExpr (Cast e c) = Cast (annotExpr e) c
+ annotExpr (Tick t e) = Tick t (annotExpr e)
+ annotExpr other = other
+
+ annotAlt b (con, bs, e) = (con, bs, annot (b, con) $ annotExpr e)
+
+ fileFS = mkFastString file
+ mkSpan n = realSrcLocSpan (mkRealSrcLoc fileFS n 1)
+
+ annot name expr =
+ let nameStr = showSDocDump dflags (ppr name)
+ in case Map.lookup nameStr marks of
+ Just n -> let ann (Lam b e) = Lam b (ann e)
+ ann other = Tick (SourceNote (mkSpan n) nameStr) other
+ in ann expr
+ Nothing -> expr
+
\end{code}
diff --git a/compiler/nativeGen/AsmCodeGen.lhs b/compiler/nativeGen/AsmCodeGen.lhs
index caae4b14093b..b1f12f5e051f 100644
--- a/compiler/nativeGen/AsmCodeGen.lhs
+++ b/compiler/nativeGen/AsmCodeGen.lhs
@@ -7,7 +7,8 @@
-- -----------------------------------------------------------------------------
\begin{code}
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE BangPatterns, CPP, GADTs, ScopedTypeVariables, UnboxedTuples #-}
+
module AsmCodeGen ( nativeCodeGen ) where
#include "HsVersions.h"
@@ -47,6 +48,8 @@ import Instruction
import PIC
import Reg
import NCGMonad
+import Dwarf
+import Debug
import BlockId
import CgUtils ( fixStgRegisters )
@@ -152,20 +155,21 @@ data NcgImpl statics instr jumpDest = NcgImpl {
}
--------------------
-nativeCodeGen :: DynFlags -> Module -> Handle -> UniqSupply
+nativeCodeGen :: DynFlags -> Module -> ModLocation -> Handle -> UniqSupply
-> Stream IO RawCmmGroup ()
-> IO UniqSupply
-nativeCodeGen dflags this_mod h us cmms
+nativeCodeGen dflags this_mod modLoc h us cmms
= let platform = targetPlatform dflags
nCG' :: (Outputable statics, Outputable instr, Instruction instr)
=> NcgImpl statics instr jumpDest -> IO UniqSupply
- nCG' ncgImpl = nativeCodeGen' dflags this_mod ncgImpl h us cmms
+ nCG' ncgImpl = nativeCodeGen' dflags this_mod modLoc ncgImpl h us cmms
in case platformArch platform of
ArchX86 -> nCG' (x86NcgImpl dflags)
ArchX86_64 -> nCG' (x86_64NcgImpl dflags)
ArchPPC -> nCG' (ppcNcgImpl dflags)
ArchSPARC -> nCG' (sparcNcgImpl dflags)
ArchARM {} -> panic "nativeCodeGen: No NCG for ARM"
+ ArchARM64 -> panic "nativeCodeGen: No NCG for ARM64"
ArchPPC_64 -> panic "nativeCodeGen: No NCG for PPC 64"
ArchAlpha -> panic "nativeCodeGen: No NCG for Alpha"
ArchMipseb -> panic "nativeCodeGen: No NCG for mipseb"
@@ -253,55 +257,48 @@ type NativeGenAcc statics instr
= ([[CLabel]],
[([NatCmmDecl statics instr],
Maybe [Color.RegAllocStats statics instr],
- Maybe [Linear.RegAllocStats])])
+ Maybe [Linear.RegAllocStats])],
+ [DebugBlock],
+ DwarfFiles)
nativeCodeGen' :: (Outputable statics, Outputable instr, Instruction instr)
=> DynFlags
- -> Module
+ -> Module -> ModLocation
-> NcgImpl statics instr jumpDest
-> Handle
-> UniqSupply
-> Stream IO RawCmmGroup ()
-> IO UniqSupply
-nativeCodeGen' dflags this_mod ncgImpl h us cmms
+nativeCodeGen' dflags this_mod modLoc ncgImpl h us cmms
= do
- let split_cmms = Stream.map add_split cmms
-- BufHandle is a performance hack. We could hide it inside
-- Pretty if it weren't for the fact that we do lots of little
-- printDocs here (in order to do codegen in constant space).
bufh <- newBufHandle h
- (ngs, us') <- cmmNativeGenStream dflags this_mod ncgImpl bufh us split_cmms ([], [])
- finishNativeGen dflags ncgImpl bufh ngs
-
- return us'
-
- where add_split tops
- | gopt Opt_SplitObjs dflags = split_marker : tops
- | otherwise = tops
-
- split_marker = CmmProc mapEmpty mkSplitMarkerLabel []
- (ofBlockList (panic "split_marker_entry") [])
-
+ (ngs, us') <- cmmNativeGenStream dflags this_mod modLoc ncgImpl bufh us cmms ([], [], [], emptyUFM)
+ finishNativeGen dflags modLoc bufh us' ngs
finishNativeGen :: Instruction instr
=> DynFlags
- -> NcgImpl statics instr jumpDest
+ -> ModLocation
-> BufHandle
+ -> UniqSupply
-> NativeGenAcc statics instr
- -> IO ()
-finishNativeGen dflags ncgImpl bufh@(BufHandle _ _ h) (imports, prof)
+ -> IO UniqSupply
+finishNativeGen dflags modLoc bufh@(BufHandle _ _ h) us (imports, prof, debugs, _)
= do
+ -- Write debug data and finish
+ let emitDwarf = gopt Opt_Debug dflags && not (gopt Opt_SplitObjs dflags)
+ us' <- if not emitDwarf then return us else do
+ (dwarf, us') <- dwarfGen dflags modLoc us debugs
+ emitNativeCode dflags bufh dwarf
+ return us'
bFlush bufh
let platform = targetPlatform dflags
let (native, colorStats, linearStats)
= unzip3 prof
- -- dump native code
- dumpIfSet_dyn dflags
- Opt_D_dump_asm "Asm code"
- (vcat $ map (pprNatCmmDecl ncgImpl) $ concat native)
-
-- dump global NCG stats for graph coloring allocator
(case concat $ catMaybes colorStats of
[] -> return ()
@@ -335,10 +332,11 @@ finishNativeGen dflags ncgImpl bufh@(BufHandle _ _ h) (imports, prof)
Pretty.printDoc Pretty.LeftMode (pprCols dflags) h
$ withPprStyleDoc dflags (mkCodeStyle AsmStyle)
$ makeImportsDoc dflags (concat imports)
+ return us'
cmmNativeGenStream :: (Outputable statics, Outputable instr, Instruction instr)
=> DynFlags
- -> Module
+ -> Module -> ModLocation
-> NcgImpl statics instr jumpDest
-> BufHandle
-> UniqSupply
@@ -346,78 +344,127 @@ cmmNativeGenStream :: (Outputable statics, Outputable instr, Instruction instr)
-> NativeGenAcc statics instr
-> IO (NativeGenAcc statics instr, UniqSupply)
-cmmNativeGenStream dflags this_mod ncgImpl h us cmm_stream ngs@(impAcc, profAcc)
+cmmNativeGenStream dflags this_mod modLoc ncgImpl h us cmm_stream ngs@(impAcc, profAcc, dbgs, fileIds)
= do r <- Stream.runStream cmm_stream
case r of
Left () ->
- return ((reverse impAcc, reverse profAcc) , us)
+ return ((reverse impAcc, reverse profAcc, dbgs, fileIds) , us)
Right (cmms, cmm_stream') -> do
- (ngs',us') <- cmmNativeGens dflags this_mod ncgImpl h us cmms ngs 0
- cmmNativeGenStream dflags this_mod ncgImpl h us' cmm_stream' ngs'
+
+ -- Generate debug information
+ let debugFlag = gopt Opt_Debug dflags
+ ndbgs | debugFlag = cmmDebugGen modLoc cmms
+ | otherwise = []
+ dbgMap = debugToMap ndbgs
+
+ -- Insert split marker, generate native code
+ let splitFlag = gopt Opt_SplitObjs dflags
+ cmms' | splitFlag = split_marker : cmms
+ | otherwise = cmms
+ ((impAcc', profAcc', dbgs', fileIds'), us')
+ <- cmmNativeGens dflags this_mod modLoc ncgImpl h dbgMap us cmms' ngs 0
+
+ -- Link native code information into debug blocks
+ let nats = map (\(ns, _, _) -> ns) profAcc'
+ !ldbgs = cmmDebugLink isMetaInstr (concat nats) ndbgs
+ dumpIfSet_dyn dflags Opt_D_dump_debug "Debug Infos" (vcat $ map ppr ldbgs)
+
+ -- Emit & clear DWARF information when generating split
+ -- object files, as we need it to land in the same object file
+ (dbgs'', fileIds'', us'') <-
+ if debugFlag && splitFlag
+ then do (dwarf, us'') <- dwarfGen dflags modLoc us ldbgs
+ emitNativeCode dflags h dwarf
+ return ([], emptyUFM, us'')
+ else return (dbgs' ++ ldbgs, fileIds', us')
+
+ -- Strip references to native code unless we want to dump it later
+ let dumpFlag = dopt Opt_D_dump_asm_stats dflags
+ profAcc'' | dumpFlag = profAcc'
+ | otherwise = map (\(_, a, b) -> ([], a, b)) profAcc'
+ seqList profAcc'' $ seqList dbgs'' $
+ cmmNativeGenStream dflags this_mod modLoc ncgImpl h us'' cmm_stream'
+ (impAcc', profAcc'', dbgs'', fileIds'')
+
+ where split_marker = CmmProc mapEmpty mkSplitMarkerLabel []
+ (ofBlockList (panic "split_marker_entry") [])
-- | Do native code generation on all these cmms.
--
cmmNativeGens :: (Outputable statics, Outputable instr, Instruction instr)
=> DynFlags
- -> Module
+ -> Module -> ModLocation
-> NcgImpl statics instr jumpDest
-> BufHandle
+ -> LabelMap DebugBlock
-> UniqSupply
-> [RawCmmDecl]
-> NativeGenAcc statics instr
-> Int
-> IO (NativeGenAcc statics instr, UniqSupply)
-cmmNativeGens _ _ _ _ us [] ngs _
+cmmNativeGens _ _ _ _ _ _ us [] ngs _
= return (ngs, us)
-cmmNativeGens dflags this_mod ncgImpl h us (cmm : cmms) (impAcc, profAcc) count
+cmmNativeGens dflags this_mod modLoc ncgImpl h dbgMap us (cmm : cmms) (impAcc, profAcc, debugs, fileIds) count
= do
- (us', native, imports, colorStats, linearStats)
- <- {-# SCC "cmmNativeGen" #-} cmmNativeGen dflags this_mod ncgImpl us cmm count
+ (us', fileIds', native, imports, colorStats, linearStats)
+ <- {-# SCC "cmmNativeGen" #-} cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
- {-# SCC "pprNativeCode" #-} Pretty.bufLeftRender h
- $ withPprStyleDoc dflags (mkCodeStyle AsmStyle)
- $ vcat $ map (pprNatCmmDecl ncgImpl) native
+ let newFileIds = fileIds' `minusUFM` fileIds
+ pprDecl (f,n) = ptext (sLit "\t.file ") <> ppr n <+> doubleQuotes (ftext f)
- let !lsPprNative =
- if dopt Opt_D_dump_asm dflags
- || dopt Opt_D_dump_asm_stats dflags
- then native
- else []
+ emitNativeCode dflags h $ vcat $
+ map pprDecl (eltsUFM newFileIds) ++
+ map (pprNatCmmDecl ncgImpl) native
let !count' = count + 1
-- force evaluation all this stuff to avoid space leaks
{-# SCC "seqString" #-} evaluate $ seqString (showSDoc dflags $ vcat $ map ppr imports)
- cmmNativeGens dflags this_mod ncgImpl h
+ cmmNativeGens dflags this_mod modLoc ncgImpl h dbgMap
us' cmms ((imports : impAcc),
- ((lsPprNative, colorStats, linearStats) : profAcc))
+ ((native, colorStats, linearStats) : profAcc),
+ debugs, fileIds')
count'
where seqString [] = ()
seqString (x:xs) = x `seq` seqString xs
+emitNativeCode :: DynFlags -> BufHandle -> SDoc -> IO ()
+emitNativeCode dflags h sdoc = do
+
+ {-# SCC "pprNativeCode" #-} Pretty.bufLeftRender h
+ $ withPprStyleDoc dflags (mkCodeStyle AsmStyle) sdoc
+
+ -- dump native code
+ dumpIfSet_dyn dflags
+ Opt_D_dump_asm "Asm code"
+ sdoc
+
-- | Complete native code generation phase for a single top-level chunk of Cmm.
-- Dumping the output of each stage along the way.
-- Global conflict graph and NGC stats
cmmNativeGen
:: (Outputable statics, Outputable instr, Instruction instr)
=> DynFlags
- -> Module
+ -> Module -> ModLocation
-> NcgImpl statics instr jumpDest
-> UniqSupply
+ -> DwarfFiles
+ -> LabelMap DebugBlock
-> RawCmmDecl -- ^ the cmm to generate code for
-> Int -- ^ sequence number of this top thing
-> IO ( UniqSupply
+ , DwarfFiles
, [NatCmmDecl statics instr] -- native code
, [CLabel] -- things imported by this cmm
, Maybe [Color.RegAllocStats statics instr] -- stats for the coloring register allocator
, Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
-cmmNativeGen dflags this_mod ncgImpl us cmm count
+cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
= do
let platform = targetPlatform dflags
@@ -436,9 +483,9 @@ cmmNativeGen dflags this_mod ncgImpl us cmm count
(pprCmmGroup [opt_cmm])
-- generate native code from cmm
- let ((native, lastMinuteImports), usGen) =
+ let ((native, lastMinuteImports, fileIds'), usGen) =
{-# SCC "genMachCode" #-}
- initUs us $ genMachCode dflags this_mod (cmmTopCodeGen ncgImpl) opt_cmm
+ initUs us $ genMachCode dflags this_mod modLoc (cmmTopCodeGen ncgImpl) fileIds dbgMap opt_cmm
dumpIfSet_dyn dflags
Opt_D_dump_asm_native "Native code"
@@ -569,6 +616,7 @@ cmmNativeGen dflags this_mod ncgImpl us cmm count
(vcat $ map (pprNatCmmDecl ncgImpl) expanded)
return ( usAlloc
+ , fileIds'
, expanded
, lastMinuteImports ++ imports
, ppr_raStatsColor
@@ -604,7 +652,7 @@ makeImportsDoc dflags imports
then text ".section .note.GNU-stack,\"\",@progbits"
else empty)
$$
- -- And just because every other compiler does, lets stick in
+ -- And just because every other compiler does, let's stick in
-- an identifier directive: .ident "GHC x.y.z"
(if platformHasIdentDirective platform
then let compilerIdent = text "GHC" <+> text cProjectVersion
@@ -824,21 +872,24 @@ apply_mapping ncgImpl ufm (CmmProc info lbl live (ListGraph blocks))
genMachCode
:: DynFlags
- -> Module
+ -> Module -> ModLocation
-> (RawCmmDecl -> NatM [NatCmmDecl statics instr])
+ -> DwarfFiles
+ -> LabelMap DebugBlock
-> RawCmmDecl
-> UniqSM
( [NatCmmDecl statics instr]
- , [CLabel])
+ , [CLabel]
+ , DwarfFiles)
-genMachCode dflags this_mod cmmTopCodeGen cmm_top
+genMachCode dflags this_mod modLoc cmmTopCodeGen fileIds dbgMap cmm_top
= do { initial_us <- getUs
- ; let initial_st = mkNatM_State initial_us 0 dflags this_mod
+ ; let initial_st = mkNatM_State initial_us 0 dflags this_mod modLoc fileIds dbgMap
(new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
final_delta = natm_delta final_st
final_imports = natm_imports final_st
; if final_delta == 0
- then return (new_tops, final_imports)
+ then return (new_tops, final_imports, natm_fileid final_st)
else pprPanic "genMachCode: nonzero final delta" (int final_delta)
}
@@ -1023,15 +1074,15 @@ cmmExprNative referenceKind expr = do
CmmReg (CmmGlobal EagerBlackholeInfo)
| arch == ArchPPC && not (gopt Opt_PIC dflags)
-> cmmExprNative referenceKind $
- CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
+ CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey (fsLit "__stg_EAGER_BLACKHOLE_info")))
CmmReg (CmmGlobal GCEnter1)
| arch == ArchPPC && not (gopt Opt_PIC dflags)
-> cmmExprNative referenceKind $
- CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
+ CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey (fsLit "__stg_gc_enter_1")))
CmmReg (CmmGlobal GCFun)
| arch == ArchPPC && not (gopt Opt_PIC dflags)
-> cmmExprNative referenceKind $
- CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))
+ CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey (fsLit "__stg_gc_fun")))
other
-> return other
diff --git a/compiler/nativeGen/CPrim.hs b/compiler/nativeGen/CPrim.hs
index a6f4cab7bd45..c52fe10b13d7 100644
--- a/compiler/nativeGen/CPrim.hs
+++ b/compiler/nativeGen/CPrim.hs
@@ -1,11 +1,18 @@
-- | Generating C symbol names emitted by the compiler.
module CPrim
- ( popCntLabel
+ ( atomicReadLabel
+ , atomicWriteLabel
+ , atomicRMWLabel
+ , cmpxchgLabel
+ , popCntLabel
, bSwapLabel
+ , clzLabel
+ , ctzLabel
, word2FloatLabel
) where
import CmmType
+import CmmMachOp
import Outputable
popCntLabel :: Width -> String
@@ -25,9 +32,70 @@ bSwapLabel w = "hs_bswap" ++ pprWidth w
pprWidth W64 = "64"
pprWidth w = pprPanic "bSwapLabel: Unsupported word width " (ppr w)
+clzLabel :: Width -> String
+clzLabel w = "hs_clz" ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "clzLabel: Unsupported word width " (ppr w)
+
+ctzLabel :: Width -> String
+ctzLabel w = "hs_ctz" ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "ctzLabel: Unsupported word width " (ppr w)
+
word2FloatLabel :: Width -> String
word2FloatLabel w = "hs_word2float" ++ pprWidth w
where
pprWidth W32 = "32"
pprWidth W64 = "64"
pprWidth w = pprPanic "word2FloatLabel: Unsupported word width " (ppr w)
+
+atomicRMWLabel :: Width -> AtomicMachOp -> String
+atomicRMWLabel w amop = "hs_atomic_" ++ pprFunName amop ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "atomicRMWLabel: Unsupported word width " (ppr w)
+
+ pprFunName AMO_Add = "add"
+ pprFunName AMO_Sub = "sub"
+ pprFunName AMO_And = "and"
+ pprFunName AMO_Nand = "nand"
+ pprFunName AMO_Or = "or"
+ pprFunName AMO_Xor = "xor"
+
+cmpxchgLabel :: Width -> String
+cmpxchgLabel w = "hs_cmpxchg" ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "cmpxchgLabel: Unsupported word width " (ppr w)
+
+atomicReadLabel :: Width -> String
+atomicReadLabel w = "hs_atomicread" ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "atomicReadLabel: Unsupported word width " (ppr w)
+
+atomicWriteLabel :: Width -> String
+atomicWriteLabel w = "hs_atomicwrite" ++ pprWidth w
+ where
+ pprWidth W8 = "8"
+ pprWidth W16 = "16"
+ pprWidth W32 = "32"
+ pprWidth W64 = "64"
+ pprWidth w = pprPanic "atomicWriteLabel: Unsupported word width " (ppr w)
diff --git a/compiler/nativeGen/Dwarf.hs b/compiler/nativeGen/Dwarf.hs
new file mode 100644
index 000000000000..8ab6c6e7559a
--- /dev/null
+++ b/compiler/nativeGen/Dwarf.hs
@@ -0,0 +1,164 @@
+
+module Dwarf (
+ dwarfGen
+ ) where
+
+import CLabel
+import CmmExpr ( GlobalReg(..) )
+import Config ( cProjectName, cProjectVersion )
+import CoreSyn ( Tickish(..) )
+import Debug
+import DynFlags
+import FastString
+import Module
+import Outputable
+import Platform
+import Unique
+import UniqSupply
+
+import Dwarf.Constants
+import Dwarf.Types
+
+import Data.Maybe
+import Data.List ( sortBy )
+import Data.Ord ( comparing )
+import qualified Data.Map as Map
+import System.FilePath
+import System.Directory ( getCurrentDirectory )
+
+import qualified Compiler.Hoopl as H
+
+-- | Generate DWARF/debug information
+dwarfGen :: DynFlags -> ModLocation -> UniqSupply -> [DebugBlock]
+ -> IO (SDoc, UniqSupply)
+dwarfGen df modLoc us blocks = do
+
+ -- Convert debug data structures to DWARF info records
+ let procs = debugSplitProcs blocks
+ compPath <- getCurrentDirectory
+ let dwarfUnit = DwarfCompileUnit
+ { dwChildren = map (procToDwarf df) procs
+ , dwName = fromMaybe "" (ml_hs_file modLoc)
+ , dwCompDir = addTrailingPathSeparator compPath
+ , dwProducer = cProjectName ++ " " ++ cProjectVersion
+ , dwLineLabel = dwarfLineLabel
+ }
+
+ -- Check whether we have any source code information, so we do not
+ -- end up writing a pointer to an empty .debug_line section
+ -- (dsymutil on Mac Os gets confused by this).
+ let haveSrcIn blk = isJust (dblSourceTick blk) && isJust (dblPosition blk)
+ || any haveSrcIn (dblBlocks blk)
+ haveSrc = any haveSrcIn procs
+
+ -- .debug_abbrev section: Declare the format we're using
+ let abbrevSct = pprAbbrevDecls haveSrc
+
+ -- .debug_info section: Information records on procedures and blocks
+ let (unitU, us') = takeUniqFromSupply us
+ infoSct = vcat [ dwarfInfoSection
+ , compileUnitHeader unitU
+ , pprDwarfInfo haveSrc dwarfUnit
+ , compileUnitFooter unitU
+ ]
+
+ -- .debug_line section: Generated mainly by the assembler, but we need to label it
+ let lineSct = dwarfLineSection $$
+ ptext dwarfLineLabel <> colon
+
+ -- .debug_frame section: Information about the layout of the GHC stack
+ let (framesU, us'') = takeUniqFromSupply us'
+ frameSct = dwarfFrameSection $$
+ ptext dwarfFrameLabel <> colon $$
+ pprDwarfFrame (debugFrame framesU procs)
+
+ -- .debug_ghc section: debug data in eventlog format (GHC-specific, obviously)
+ evData <- writeDebugToEventlog df modLoc blocks
+ let ghcSct = dwarfGhcSection $$
+ pprBuffer evData
+
+ return (infoSct $$ abbrevSct $$ lineSct $$ frameSct $$ ghcSct, us'')
+
+-- | Header for a compilation unit, establishing global format
+-- parameters
+compileUnitHeader :: Unique -> SDoc
+compileUnitHeader unitU = sdocWithPlatform $ \plat ->
+ let cuLabel = mkAsmTempLabel unitU
+ length = ppr (mkAsmTempEndLabel cuLabel) <> char '-' <> ppr cuLabel
+ in vcat [ ptext (sLit "\t.long ") <> length -- compilation unit size
+ , ppr cuLabel <> colon
+ , ptext (sLit "\t.word 3") -- DWARF version
+ , pprDwWord (ptext dwarfAbbrevLabel <> char '-' <>
+ ptext dwarfAbbrevLabel) -- pointer to our abbrevs
+ , ptext (sLit "\t.byte ") <> ppr (platformWordSize plat) -- word size
+ ]
+
+-- | Compilation unit footer, mainly establishing size of debug sections
+compileUnitFooter :: Unique -> SDoc
+compileUnitFooter unitU =
+ let cuEndLabel = mkAsmTempEndLabel $ mkAsmTempLabel unitU
+ in ppr cuEndLabel <> colon
+
+-- | Splits the blocks by procedures. In the result all nested blocks
+-- will come from the same procedure as the top-level block.
+debugSplitProcs :: [DebugBlock] -> [DebugBlock]
+debugSplitProcs b = concat $ H.mapElems $ mapMerges $ map split b
+ where mapMerges = foldr (H.mapUnionWithKey (const (++))) H.mapEmpty
+ split :: DebugBlock -> H.LabelMap [DebugBlock]
+ split blk = H.mapInsert (dblProcedure blk) [blk {dblBlocks = own_blks}] blks
+ where own_blks = fromMaybe [] $ H.mapLookup (dblProcedure blk) blks
+ blks = mapMerges $ map split $ dblBlocks blk
+ -- Note we are invalidating the tick tree here. We could fix
+ -- it - but then again we don't actually care about dblTicks here.
+
+-- | Generate DWARF info for a procedure debug block
+procToDwarf :: DynFlags -> DebugBlock -> DwarfInfo
+procToDwarf df prc
+ = DwarfSubprogram { dwChildren = foldr blockToDwarf [] $ dblBlocks prc
+ , dwName = case dblSourceTick prc of
+ Just s@SourceNote{} -> sourceName s
+ _otherwise -> showSDocDump df $ ppr $ dblLabel prc
+ , dwLabel = dblCLabel prc
+ }
+
+blockToDwarf :: DebugBlock -> [DwarfInfo] -> [DwarfInfo]
+blockToDwarf blk dws
+ | isJust (dblPosition blk) = dw : dws
+ | otherwise = nested ++ dws -- block was optimized out, flatten
+ where nested = foldr blockToDwarf [] $ dblBlocks blk
+ dw = DwarfBlock { dwChildren = nested
+ , dwLabel = dblCLabel blk
+ , dwMarker = mkAsmTempLabel (dblLabel blk)
+ }
+
+-- | Generates the data for the debug frame section, which encodes the
+-- desired stack unwind behaviour for the debugger
+debugFrame :: Unique -> [DebugBlock] -> DwarfFrame
+debugFrame u procs
+ = DwarfFrame { dwCieLabel = mkAsmTempLabel u
+ , dwCieInit = initUws
+ , dwCieProcs = map (procToFrame initUws) procs
+ }
+ where initUws = Map.fromList [(Sp, UwReg Sp 0)]
+
+-- | Generates unwind information for a procedure debug block
+procToFrame :: UnwindTable -> DebugBlock -> DwarfFrameProc
+procToFrame initUws blk
+ = DwarfFrameProc { dwFdeProc = dblCLabel blk
+ , dwFdeHasInfo = dblHasInfoTbl blk
+ , dwFdeBlocks = map (uncurry blockToFrame) blockUws
+ }
+ where blockUws :: [(DebugBlock, UnwindTable)]
+ blockUws = map snd $ sortBy (comparing fst) $ flatten initUws blk
+ flatten uws0 b@DebugBlock{dblPosition=pos, dblUnwind=uws, dblBlocks=blocks }
+ | Just p <- pos = (p, (b, uws')):nested
+ | otherwise = nested -- block was optimized out
+ where uws' = uws `Map.union` uws0
+ nested = concatMap (flatten uws') blocks
+
+blockToFrame :: DebugBlock -> UnwindTable -> DwarfFrameBlock
+blockToFrame blk uws
+ = DwarfFrameBlock { dwFdeBlock = mkAsmTempLabel $ dblLabel blk
+ , dwFdeBlkHasInfo = dblHasInfoTbl blk
+ , dwFdeUnwind = uws
+ }
diff --git a/compiler/nativeGen/Dwarf/Constants.hs b/compiler/nativeGen/Dwarf/Constants.hs
new file mode 100644
index 000000000000..8a4fde415bd5
--- /dev/null
+++ b/compiler/nativeGen/Dwarf/Constants.hs
@@ -0,0 +1,193 @@
+
+-- | Constants describing the DWARF format. Most of this simply
+-- mirrors /usr/include/dwarf.h.
+
+module Dwarf.Constants where
+
+import FastString
+import Outputable
+import Platform
+
+import Reg
+import X86.Regs
+
+import Data.Word
+
+-- | Language ID used for Haskell.
+dW_LANG_Haskell :: Word
+dW_LANG_Haskell = 0x18
+ -- Thanks to Nathan Howell for getting us our very own language ID!
+
+-- | Dwarf tags
+dW_TAG_compile_unit, dW_TAG_subroutine_type,
+ dW_TAG_file_type, dW_TAG_subprogram, dW_TAG_lexical_block,
+ dW_TAG_base_type, dW_TAG_structure_type, dW_TAG_pointer_type,
+ dW_TAG_array_type, dW_TAG_subrange_type, dW_TAG_typedef,
+ dW_TAG_variable, dW_TAG_arg_variable, dW_TAG_auto_variable :: Word
+dW_TAG_array_type = 1
+dW_TAG_lexical_block = 11
+dW_TAG_pointer_type = 15
+dW_TAG_compile_unit = 17
+dW_TAG_structure_type = 19
+dW_TAG_typedef = 22
+dW_TAG_subroutine_type = 32
+dW_TAG_subrange_type = 33
+dW_TAG_base_type = 36
+dW_TAG_file_type = 41
+dW_TAG_subprogram = 46
+dW_TAG_variable = 52
+dW_TAG_auto_variable = 256
+dW_TAG_arg_variable = 257
+
+-- | Dwarf attributes
+dW_AT_name, dW_AT_stmt_list, dW_AT_low_pc, dW_AT_high_pc, dW_AT_language,
+ dW_AT_comp_dir, dW_AT_producer, dW_AT_external, dW_AT_frame_base,
+ dW_AT_MIPS_linkage_name :: Word
+dW_AT_name = 0x03
+dW_AT_stmt_list = 0x10
+dW_AT_low_pc = 0x11
+dW_AT_high_pc = 0x12
+dW_AT_language = 0x13
+dW_AT_comp_dir = 0x1b
+dW_AT_producer = 0x25
+dW_AT_external = 0x3f
+dW_AT_frame_base = 0x40
+dW_AT_MIPS_linkage_name = 0x2007
+
+-- | Abbrev declaration
+dW_CHILDREN_no, dW_CHILDREN_yes :: Word8
+dW_CHILDREN_no = 0
+dW_CHILDREN_yes = 1
+
+dW_FORM_addr, dW_FORM_data4, dW_FORM_string, dW_FORM_flag,
+ dW_FORM_block1, dW_FORM_ref4 :: Word
+dW_FORM_addr = 0x01
+dW_FORM_data4 = 0x06
+dW_FORM_string = 0x08
+dW_FORM_flag = 0x0c
+dW_FORM_block1 = 0x0a
+dW_FORM_ref4 = 0x13
+
+-- | Dwarf native types
+dW_ATE_address, dW_ATE_boolean, dW_ATE_float, dW_ATE_signed,
+ dW_ATE_signed_char, dW_ATE_unsigned, dW_ATE_unsigned_char :: Word
+dW_ATE_address = 1
+dW_ATE_boolean = 2
+dW_ATE_float = 4
+dW_ATE_signed = 5
+dW_ATE_signed_char = 6
+dW_ATE_unsigned = 7
+dW_ATE_unsigned_char = 8
+
+-- | Call frame information
+dW_CFA_set_loc, dW_CFA_undefined, dW_CFA_same_value,
+ dW_CFA_def_cfa, dW_CFA_def_cfa_offset, dW_CFA_def_cfa_expression,
+ dW_CFA_expression, dW_CFA_offset_extended_sf, dW_CFA_def_cfa_offset_sf,
+ dW_CFA_def_cfa_sf, dW_CFA_val_offset, dW_CFA_val_expression,
+ dW_CFA_offset :: Word8
+dW_CFA_set_loc = 0x01
+dW_CFA_undefined = 0x07
+dW_CFA_same_value = 0x08
+dW_CFA_def_cfa = 0x0c
+dW_CFA_def_cfa_offset = 0x0e
+dW_CFA_def_cfa_expression = 0x0f
+dW_CFA_expression = 0x10
+dW_CFA_offset_extended_sf = 0x11
+dW_CFA_def_cfa_sf = 0x12
+dW_CFA_def_cfa_offset_sf = 0x13
+dW_CFA_val_offset = 0x14
+dW_CFA_val_expression = 0x16
+dW_CFA_offset = 0x80
+
+-- | Operations
+dW_OP_deref, dW_OP_consts,
+ dW_OP_minus, dW_OP_mul, dW_OP_plus,
+ dW_OP_lit0, dW_OP_breg0, dW_OP_call_frame_cfa :: Word8
+dW_OP_deref = 0x06
+dW_OP_consts = 0x11
+dW_OP_minus = 0x1c
+dW_OP_mul = 0x1e
+dW_OP_plus = 0x22
+dW_OP_lit0 = 0x30
+dW_OP_breg0 = 0x70
+dW_OP_call_frame_cfa = 0x9c
+
+-- | Dwarf section declarations
+dwarfInfoSection, dwarfAbbrevSection, dwarfLineSection,
+ dwarfFrameSection, dwarfGhcSection :: SDoc
+dwarfInfoSection = dwarfSection "info"
+dwarfAbbrevSection = dwarfSection "abbrev"
+dwarfLineSection = dwarfSection "line"
+dwarfFrameSection = dwarfSection "frame"
+dwarfGhcSection = dwarfSection "ghc"
+
+dwarfSection :: String -> SDoc
+dwarfSection name = sdocWithPlatform $ \plat ->
+ case platformOS plat of
+ OSDarwin -> ftext $ mkFastString $ ".section __DWARF,__debug_" ++ name ++ ",regular,debug"
+ _other -> ftext $ mkFastString $ ".section .debug_" ++ name ++ ",\"\",@progbits"
+
+-- | Dwarf section labels
+dwarfInfoLabel, dwarfAbbrevLabel, dwarfLineLabel, dwarfFrameLabel :: LitString
+dwarfInfoLabel = sLit ".Lsection_info"
+dwarfAbbrevLabel = sLit ".Lsection_abbrev"
+dwarfLineLabel = sLit ".Lsection_line"
+dwarfFrameLabel = sLit ".Lsection_frame"
+
+-- | Mapping of registers to DWARF register numbers
+dwarfRegNo :: Platform -> Reg -> Word8
+dwarfRegNo p r = case platformArch p of
+ ArchX86
+ | r == eax -> 0
+ | r == ecx -> 1 -- yes, no typo
+ | r == edx -> 2
+ | r == ebx -> 3
+ | r == esp -> 4
+ | r == ebp -> 5
+ | r == esi -> 6
+ | r == edi -> 7
+ ArchX86_64
+ | r == rax -> 0
+ | r == rdx -> 1 -- this neither. The order GCC allocates registers in?
+ | r == rcx -> 2
+ | r == rbx -> 3
+ | r == rsi -> 4
+ | r == rdi -> 5
+ | r == rbp -> 6
+ | r == rsp -> 7
+ | r == r8 -> 8
+ | r == r9 -> 9
+ | r == r10 -> 10
+ | r == r11 -> 11
+ | r == r12 -> 12
+ | r == r13 -> 13
+ | r == r14 -> 14
+ | r == r15 -> 15
+ | r == xmm0 -> 17
+ | r == xmm1 -> 18
+ | r == xmm2 -> 19
+ | r == xmm3 -> 20
+ | r == xmm4 -> 21
+ | r == xmm5 -> 22
+ | r == xmm6 -> 23
+ | r == xmm7 -> 24
+ | r == xmm8 -> 25
+ | r == xmm9 -> 26
+ | r == xmm10 -> 27
+ | r == xmm11 -> 28
+ | r == xmm12 -> 29
+ | r == xmm13 -> 30
+ | r == xmm14 -> 31
+ | r == xmm15 -> 32
+ _other -> error "dwarfRegNo: Unsupported platform or unknown register!"
+
+-- | Virtual register number to use for return address.
+dwarfReturnRegNo :: Platform -> Word8
+dwarfReturnRegNo p
+ -- We "overwrite" IP with our pseudo register - that makes sense, as
+ -- when using this mechanism gdb already knows the IP anyway. Clang
+ -- does this too, so it must be safe.
+ = case platformArch p of
+ ArchX86 -> 8 -- eip
+ ArchX86_64 -> 16 -- rip
+ _other -> error "dwarfReturnRegNo: Unsupported platform!"
diff --git a/compiler/nativeGen/Dwarf/Types.hs b/compiler/nativeGen/Dwarf/Types.hs
new file mode 100644
index 000000000000..992569109d7b
--- /dev/null
+++ b/compiler/nativeGen/Dwarf/Types.hs
@@ -0,0 +1,446 @@
+
+module Dwarf.Types
+ ( -- * Dwarf information
+ DwarfInfo(..)
+ , pprDwarfInfo
+ , pprAbbrevDecls
+ -- * Dwarf frame
+ , DwarfFrame(..), DwarfFrameProc(..), DwarfFrameBlock(..)
+ , pprDwarfFrame
+ -- * Utilities
+ , pprByte
+ , pprData4'
+ , pprDwWord
+ , pprWord
+ , pprLEBWord
+ , pprLEBInt
+ , wordAlign
+ , pprBuffer
+ )
+ where
+
+import Binary
+import Debug
+import CLabel
+import CmmExpr ( GlobalReg(..) )
+import FastString
+import Outputable
+import Platform
+import Reg
+
+import Dwarf.Constants
+
+import Data.Bits
+import Data.List ( mapAccumL )
+import qualified Data.Map as Map
+import Data.Word
+import Data.Char
+
+import CodeGen.Platform
+
+import Foreign
+
+import qualified System.IO.Unsafe as Unsafe
+
+-- | Individual dwarf records
+data DwarfInfo
+ = DwarfCompileUnit { dwChildren :: [DwarfInfo]
+ , dwName :: String
+ , dwProducer :: String
+ , dwCompDir :: String
+ , dwLineLabel :: LitString }
+ | DwarfSubprogram { dwChildren :: [DwarfInfo]
+ , dwName :: String
+ , dwLabel :: CLabel }
+ | DwarfBlock { dwChildren :: [DwarfInfo]
+ , dwLabel :: CLabel
+ , dwMarker :: CLabel }
+
+-- | Abbreviation codes used in dwarf file
+data DwarfAbbrev
+ = DwAbbrNull -- ^ Pseudo, used for marking the end of lists
+ | DwAbbrCompileUnit
+ | DwAbbrSubprogram
+ | DwAbbrBlock
+ deriving (Eq, Enum)
+
+-- | Gives code to use in binary represenation.
+abbrevToCode :: DwarfAbbrev -> Word
+abbrevToCode = fromIntegral . fromEnum
+
+-- | Abbreviation declaration. This explains the binary encoding we
+-- use for representing @DwarfInfo@.
+pprAbbrevDecls :: Bool -> SDoc
+pprAbbrevDecls haveDebugLine =
+ let mkAbbrev abbr tag chld flds =
+ let fld (tag, form) = pprLEBWord tag $$ pprLEBWord form
+ in pprLEBWord (abbrevToCode abbr) $$ pprLEBWord tag $$ pprByte chld $$
+ vcat (map fld flds) $$ pprByte 0 $$ pprByte 0
+ in dwarfAbbrevSection $$
+ ptext dwarfAbbrevLabel <> colon $$
+ mkAbbrev DwAbbrCompileUnit dW_TAG_compile_unit dW_CHILDREN_yes
+ ([ (dW_AT_name, dW_FORM_string)
+ , (dW_AT_producer, dW_FORM_string)
+ , (dW_AT_language, dW_FORM_data4)
+ , (dW_AT_comp_dir, dW_FORM_string)
+ ] ++
+ (if haveDebugLine
+ then [ (dW_AT_stmt_list, dW_FORM_data4) ]
+ else [])) $$
+ mkAbbrev DwAbbrSubprogram dW_TAG_subprogram dW_CHILDREN_yes
+ [ (dW_AT_name, dW_FORM_string)
+ , (dW_AT_MIPS_linkage_name, dW_FORM_string)
+ , (dW_AT_external, dW_FORM_flag)
+ , (dW_AT_low_pc, dW_FORM_addr)
+ , (dW_AT_high_pc, dW_FORM_addr)
+ , (dW_AT_frame_base, dW_FORM_block1)
+ ] $$
+ mkAbbrev DwAbbrBlock dW_TAG_lexical_block dW_CHILDREN_yes
+ [ (dW_AT_name, dW_FORM_string)
+ , (dW_AT_low_pc, dW_FORM_addr)
+ , (dW_AT_high_pc, dW_FORM_addr)
+ ]
+
+pprAbbrev :: DwarfAbbrev -> SDoc
+pprAbbrev = pprLEBWord . abbrevToCode
+
+pprString' :: SDoc -> SDoc
+pprString' str = ptext (sLit "\t.asciz \"") <> str <> char '"'
+
+pprString :: String -> SDoc
+pprString = pprString' . hcat . map escape
+ where escape '\\' = ptext (sLit "\\\\")
+ escape '\"' = ptext (sLit "\\\"")
+ escape '\n' = ptext (sLit "\\n")
+ escape c | isAscii c && isPrint c && c /= '?' -- silence trigraph warnings
+ = char c
+ | otherwise
+ = let ch = ord c
+ in char '\\' <>
+ char (intToDigit (ch `div` 64)) <>
+ char (intToDigit ((ch `div` 8) `mod` 8)) <>
+ char (intToDigit (ch `mod` 8))
+
+pprData4' :: SDoc -> SDoc
+pprData4' x = ptext (sLit "\t.long ") <> x
+
+pprData4 :: Word -> SDoc
+pprData4 = pprData4' . ppr
+
+pprDwWord :: SDoc -> SDoc
+pprDwWord = pprData4'
+
+-- | Machine-dependent word directive
+pprWord :: SDoc -> SDoc
+pprWord s = (<> s) . sdocWithPlatform $ \plat ->
+ case platformWordSize plat of
+ 4 -> ptext (sLit "\t.long ")
+ 8 -> ptext (sLit "\t.quad ")
+ n -> panic $ "pprWord: Unsupported target platform word length " ++ show n ++ "!"
+
+pprFlag :: Bool -> SDoc
+pprFlag True = ptext (sLit "\t.byte 0xff")
+pprFlag False = ptext (sLit "\t.byte 0")
+
+pprDwarfInfo :: Bool -> DwarfInfo -> SDoc
+pprDwarfInfo haveSrc d
+ = pprDwarfInfoOpen haveSrc d $$
+ vcat (map (pprDwarfInfo haveSrc) (dwChildren d)) $$
+ pprDwarfInfoClose
+
+-- | Prints assembler data corresponding to DWARF info records. Note
+-- that the binary format of this is paramterized in @abbrevDecls@ and
+-- has to be kept in synch.
+pprDwarfInfoOpen :: Bool -> DwarfInfo -> SDoc
+pprDwarfInfoOpen haveSrc (DwarfCompileUnit _ name producer compDir lineLbl) =
+ pprAbbrev DwAbbrCompileUnit
+ $$ pprString name
+ $$ pprString producer
+ $$ pprData4 dW_LANG_Haskell
+ $$ pprString compDir
+ $$ if haveSrc
+ then pprData4' (ptext lineLbl <> char '-' <> ptext dwarfLineLabel)
+ else empty
+pprDwarfInfoOpen _ (DwarfSubprogram _ name label) = sdocWithDynFlags $ \df ->
+ pprAbbrev DwAbbrSubprogram
+ $$ pprString name
+ $$ pprString (renderWithStyle df (ppr label) (mkCodeStyle CStyle))
+ $$ pprFlag (externallyVisibleCLabel label)
+ $$ pprWord (ppr label)
+ $$ pprWord (ppr $ mkAsmTempEndLabel label)
+ $$ pprByte 1
+ $$ pprByte dW_OP_call_frame_cfa
+pprDwarfInfoOpen _ (DwarfBlock _ label marker) = sdocWithDynFlags $ \df ->
+ pprAbbrev DwAbbrBlock
+ $$ pprString (renderWithStyle df (ppr label) (mkCodeStyle CStyle))
+ $$ pprWord (ppr marker)
+ $$ pprWord (ppr $ mkAsmTempEndLabel marker)
+
+pprDwarfInfoClose :: SDoc
+pprDwarfInfoClose = pprAbbrev DwAbbrNull
+
+-- | Generate code for emitting the given buffer. Will take care to
+-- escape it appropriatly.
+pprBuffer :: (Int, ForeignPtr Word8) -> SDoc
+pprBuffer (len, buf) = Unsafe.unsafePerformIO $ do
+
+ -- As we output a string, we need to do escaping. We approximate
+ -- here that the escaped string will have double the size of the
+ -- original buffer. That should be plenty of space given the fact
+ -- that we expect to be converting a lot of text.
+ bh <- openBinMem (len * 2)
+ let go p q | p == q = return ()
+ | otherwise = peek p >>= escape . fromIntegral >> go (p `plusPtr` 1) q
+ escape c
+ | c == ord '\\' = putB '\\' >> putB '\\'
+ | c == ord '\"' = putB '\\' >> putB '"'
+ | c == ord '\n' = putB '\\' >> putB 'n'
+ | c == ord '?' = putB '\\' >> putB '?' -- silence trigraph warnings
+ | isAscii (chr c) && isPrint (chr c)
+ = putByte bh (fromIntegral c)
+ | otherwise = do putB '\\'
+ putB $ intToDigit (c `div` 64)
+ putB $ intToDigit ((c `div` 8) `mod` 8)
+ putB $ intToDigit (c `mod` 8)
+ putB :: Char -> IO ()
+ putB = putByte bh . fromIntegral . ord
+ {-# INLINE putB #-}
+ withForeignPtr buf $ \p ->
+ go p (p `plusPtr` len)
+
+ -- Pack result into a string
+ (elen, ebuf) <- getBinMemBuf bh
+ buf <- withForeignPtr ebuf $ \p -> mkFastStringForeignPtr p ebuf elen
+
+ return $ ptext (sLit "\t.ascii ") <> doubleQuotes (ftext buf)
+
+-- | Information about unwind instructions for a procedure. This
+-- corresponds to a "Common Information Entry" (CIE) in DWARF.
+data DwarfFrame
+ = DwarfFrame
+ { dwCieLabel :: CLabel
+ , dwCieInit :: UnwindTable
+ , dwCieProcs :: [DwarfFrameProc]
+ }
+
+-- | Unwind instructions for an individual procedure. Corresponds to a
+-- "Frame Description Entry" (FDE) in DWARF.
+data DwarfFrameProc
+ = DwarfFrameProc
+ { dwFdeProc :: CLabel
+ , dwFdeHasInfo :: Bool
+ , dwFdeBlocks :: [DwarfFrameBlock] -- ^ List of blocks. Order must match asm!
+ }
+
+-- | Unwind instructions for a block. Will become part of the
+-- containing FDE.
+data DwarfFrameBlock
+ = DwarfFrameBlock
+ { dwFdeBlock :: CLabel
+ , dwFdeBlkHasInfo :: Bool
+ , dwFdeUnwind :: UnwindTable
+ }
+
+-- | Header for the .debug_frame section. Here we emit the "Common
+-- Information Entry" record that etablishes general call frame
+-- parameters and the default stack layout.
+pprDwarfFrame :: DwarfFrame -> SDoc
+pprDwarfFrame DwarfFrame{ dwCieLabel=cieLabel, dwCieInit=cieInit, dwCieProcs=procs }
+ = sdocWithPlatform $ \plat ->
+ let cieStartLabel= mkAsmTempDerivedLabel cieLabel (fsLit "_start")
+ cieEndLabel = mkAsmTempEndLabel cieLabel
+ length = ppr cieEndLabel <> char '-' <> ppr cieStartLabel
+ spReg = dwarfGlobalRegNo plat Sp
+ retReg = dwarfReturnRegNo plat
+ wordSize = platformWordSize plat
+ pprInit (g, uw) = pprSetUnwind plat g (Nothing, uw)
+ in vcat [ ppr cieLabel <> colon
+ , pprData4' length -- Length of CIE
+ , ppr cieStartLabel <> colon
+ , pprData4' (ptext (sLit "-1"))
+ -- Common Information Entry marker (-1 = 0xf..f)
+ , pprByte 3 -- CIE version (we require DWARF 3)
+ , pprByte 0 -- Augmentation (none)
+ , pprByte 1 -- Code offset multiplicator
+ , pprByte (128-fromIntegral wordSize)
+ -- Data offset multiplicator
+ -- (stacks grow downwards => "-w" in signed LEB128)
+ , pprByte retReg -- virtual register holding return address
+ ] $$
+ -- Initial unwind table
+ vcat (map pprInit $ Map.toList cieInit) $$
+ vcat [ -- RET = *CFA
+ pprByte (dW_CFA_offset+retReg)
+ , pprByte 0
+
+ -- Sp' = CFA
+ -- (we need to set this manually as our Sp register is
+ -- often not the architecture's default stack register)
+ , pprByte dW_CFA_val_offset
+ , pprLEBWord (fromIntegral spReg)
+ , pprLEBWord 0
+ ] $$
+ wordAlign $$
+ ppr cieEndLabel <> colon $$
+ -- Procedure unwind tables
+ vcat (map (pprFrameProc cieLabel cieInit) procs)
+
+-- | Writes a "Frame Description Entry" for a procedure. This consists
+-- mainly of referencing the CIE and writing state machine
+-- instructions to describe how the frame base (CFA) changes.
+pprFrameProc :: CLabel -> UnwindTable -> DwarfFrameProc -> SDoc
+pprFrameProc frameLbl initUw (DwarfFrameProc procLbl hasInfo blocks)
+ = let fdeLabel = mkAsmTempDerivedLabel procLbl (fsLit "_fde")
+ fdeEndLabel = mkAsmTempDerivedLabel procLbl (fsLit "_fde_end")
+ procEnd = mkAsmTempEndLabel procLbl
+ ifInfo str = if hasInfo then text str else empty -- see [Note: Info Offset]
+ in vcat [ pprData4' (ppr fdeEndLabel <> char '-' <> ppr fdeLabel)
+ , ppr fdeLabel <> colon
+ , pprData4' (ppr frameLbl <> char '-' <>
+ ptext dwarfFrameLabel) -- Reference to CIE
+ , pprWord (ppr procLbl <> ifInfo "-1") -- Code pointer
+ , pprWord (ppr procEnd <> char '-' <>
+ ppr procLbl <> ifInfo "+1") -- Block byte length
+ ] $$
+ vcat (snd $ mapAccumL pprFrameBlock initUw blocks) $$
+ wordAlign $$
+ ppr fdeEndLabel <> colon
+
+-- | Generates unwind information for a block. We only generate
+-- instructions where unwind information actually changes. This small
+-- optimisations saves a lot of space, as subsequent blocks often have
+-- the same unwind information.
+pprFrameBlock :: UnwindTable -> DwarfFrameBlock -> (UnwindTable, SDoc)
+pprFrameBlock oldUws (DwarfFrameBlock blockLbl hasInfo uws)
+ | uws == oldUws
+ = (oldUws, empty)
+ | otherwise
+ = (,) uws $ sdocWithPlatform $ \plat ->
+ let lbl = ppr blockLbl <> if hasInfo then text "-1" else empty -- see [Note: Info Offset]
+ isChanged g v | old == Just v = Nothing
+ | otherwise = Just (old, v)
+ where old = Map.lookup g oldUws
+ changed = Map.toList $ Map.mapMaybeWithKey isChanged uws
+ died = Map.toList $ Map.difference oldUws uws
+ in pprByte dW_CFA_set_loc $$ pprWord lbl $$
+ vcat (map (uncurry $ pprSetUnwind plat) changed) $$
+ vcat (map (pprUndefUnwind plat . fst) died)
+
+-- [Note: Info Offset]
+--
+-- GDB was pretty much written with C-like programs in mind, and as a
+-- result they assume that once you have a return address, it is a
+-- good idea to look at (PC-1) to unwind further - as that's where the
+-- "call" instruction is supposed to be.
+--
+-- Now on one hand, code generated by GHC looks nothing like what GDB
+-- expects, and in fact going up from a return pointer is guaranteed
+-- to land us inside an info table! On the other hand, that actually
+-- gives us some wiggle room, as we expect IP to never *actually* end
+-- up inside the info table, so we can "cheat" by putting whatever GDB
+-- expects to see there. This is probably pretty safe, as GDB cannot
+-- assume (PC-1) to be a valid code pointer in the first place - and I
+-- have seen no code trying to correct this.
+--
+-- Note that this will not prevent GDB from failing to look-up the
+-- correct function name for the frame, as that uses the symbol table,
+-- which we can not manipulate as easily.
+
+dwarfGlobalRegNo :: Platform -> GlobalReg -> Word8
+dwarfGlobalRegNo plat = maybe 0 (dwarfRegNo plat . RegReal) . globalRegMaybe plat
+
+-- | Generate code for setting the unwind information for a register,
+-- optimized using its known old value in the table. Note that "Sp" is
+-- special: We see it as synonym for the CFA.
+pprSetUnwind :: Platform -> GlobalReg -> (Maybe UnwindExpr, UnwindExpr) -> SDoc
+pprSetUnwind _ Sp (Just (UwReg s _), UwReg s' o') | s == s'
+ = if o' >= 0
+ then pprByte dW_CFA_def_cfa_offset $$ pprLEBWord (fromIntegral o')
+ else pprByte dW_CFA_def_cfa_offset_sf $$ pprLEBInt o'
+pprSetUnwind plat Sp (_, UwReg s' o')
+ = if o' >= 0
+ then pprByte dW_CFA_def_cfa $$
+ pprLEBWord (fromIntegral $ dwarfGlobalRegNo plat s') $$
+ pprLEBWord (fromIntegral o')
+ else pprByte dW_CFA_def_cfa_sf $$
+ pprLEBWord (fromIntegral $ dwarfGlobalRegNo plat s') $$
+ pprLEBInt o'
+pprSetUnwind _ Sp (_, uw)
+ = pprByte dW_CFA_def_cfa_expression $$ pprUnwindExpr False uw
+pprSetUnwind plat g (_, UwDeref (UwReg Sp o))
+ | o < 0 && ((-o) `mod` platformWordSize plat) == 0 -- expected case
+ = pprByte (dW_CFA_offset + dwarfGlobalRegNo plat g) $$
+ pprLEBWord (fromIntegral ((-o) `div` platformWordSize plat))
+ | otherwise
+ = pprByte dW_CFA_offset_extended_sf $$
+ pprLEBWord (fromIntegral (dwarfGlobalRegNo plat g)) $$
+ pprLEBInt o
+pprSetUnwind plat g (_, UwDeref uw)
+ = pprByte dW_CFA_expression $$
+ pprLEBWord (fromIntegral (dwarfGlobalRegNo plat g)) $$
+ pprUnwindExpr True uw
+pprSetUnwind plat g (_, uw)
+ = pprByte dW_CFA_val_expression $$
+ pprLEBWord (fromIntegral (dwarfGlobalRegNo plat g)) $$
+ pprUnwindExpr True uw
+
+-- | Generates a DWARF expression for the given unwind expression. If
+-- @spIsCFA@ is true, we see @Sp@ as the frame base CFA where it gets
+-- mentioned.
+pprUnwindExpr :: Bool -> UnwindExpr -> SDoc
+pprUnwindExpr spIsCFA expr
+ = sdocWithPlatform $ \plat ->
+ let ppr (UwConst i)
+ | i >= 0 && i < 32 = pprByte (dW_OP_lit0 + fromIntegral i)
+ | otherwise = pprByte dW_OP_consts $$ pprLEBInt i -- the lazy way
+ ppr (UwReg Sp i) | spIsCFA
+ = if i == 0
+ then pprByte dW_OP_call_frame_cfa
+ else ppr (UwPlus (UwReg Sp 0) (UwConst i))
+ ppr (UwReg g i) = pprByte (dW_OP_breg0 + fromIntegral (dwarfGlobalRegNo plat g)) $$
+ pprLEBInt i
+ ppr (UwDeref u) = ppr u $$ pprByte dW_OP_deref
+ ppr (UwPlus u1 u2) = ppr u1 $$ ppr u2 $$ pprByte dW_OP_plus
+ ppr (UwMinus u1 u2) = ppr u1 $$ ppr u2 $$ pprByte dW_OP_minus
+ ppr (UwTimes u1 u2) = ppr u1 $$ ppr u2 $$ pprByte dW_OP_mul
+ in ptext (sLit "\t.byte 1f-.-1") $$
+ ppr expr $$
+ ptext (sLit "1:")
+
+-- | Generate code for re-setting the unwind information for a
+-- register to "undefined"
+pprUndefUnwind :: Platform -> GlobalReg -> SDoc
+pprUndefUnwind _ Sp = panic "pprUndefUnwind Sp" -- should never happen
+pprUndefUnwind plat g = pprByte dW_CFA_undefined $$
+ pprLEBWord (fromIntegral $ dwarfGlobalRegNo plat g)
+
+
+
+wordAlign :: SDoc
+wordAlign = sdocWithPlatform $ \plat ->
+ ptext (sLit "\t.align ") <> case platformOS plat of
+ OSDarwin -> case platformWordSize plat of
+ 8 -> text "3"
+ 4 -> text "2"
+ _other -> error "wordAlign: Unsupported word size!"
+ _other -> ppr (platformWordSize plat)
+
+pprByte :: Word8 -> SDoc
+pprByte x = ptext (sLit "\t.byte ") <> ppr (fromIntegral x :: Word)
+
+-- | Prints a number in "little endian base 128" format. The idea is
+-- to optimize for small numbers by stopping once all further bytes
+-- would be 0. The highest bit in every byte signals whether there
+-- are further bytes to read.
+pprLEBWord :: Word -> SDoc
+pprLEBWord x | x < 128 = pprByte (fromIntegral x)
+ | otherwise = pprByte (fromIntegral $ 128 .|. (x .&. 127)) $$
+ pprLEBWord (x `shiftR` 7)
+
+-- | Same as @pprLEBWord@, but for a signed number
+pprLEBInt :: Int -> SDoc
+pprLEBInt x | x >= -64 && x < 64
+ = pprByte (fromIntegral (x .&. 127))
+ | otherwise = pprByte (fromIntegral $ 128 .|. (x .&. 127)) $$
+ pprLEBInt (x `shiftR` 7)
+
diff --git a/compiler/nativeGen/NCGMonad.hs b/compiler/nativeGen/NCGMonad.hs
index 3ee3af2ea94d..3af3d3d36ff8 100644
--- a/compiler/nativeGen/NCGMonad.hs
+++ b/compiler/nativeGen/NCGMonad.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 1993-2004
@@ -23,7 +25,12 @@ module NCGMonad (
getNewRegPairNat,
getPicBaseMaybeNat,
getPicBaseNat,
- getDynFlags
+ getDynFlags,
+ getModLoc,
+ getFileId,
+ getDebugBlock,
+
+ DwarfFiles
)
where
@@ -36,6 +43,9 @@ import TargetReg
import BlockId
import CLabel ( CLabel, mkAsmTempLabel )
+import Debug
+import FastString ( FastString )
+import UniqFM
import UniqSupply
import Unique ( Unique )
import DynFlags
@@ -44,6 +54,8 @@ import Module
import Control.Monad ( liftM, ap )
import Control.Applicative ( Applicative(..) )
+import Compiler.Hoopl ( LabelMap, Label )
+
data NatM_State
= NatM_State {
natm_us :: UniqSupply,
@@ -51,15 +63,20 @@ data NatM_State
natm_imports :: [(CLabel)],
natm_pic :: Maybe Reg,
natm_dflags :: DynFlags,
- natm_this_module :: Module
+ natm_this_module :: Module,
+ natm_modloc :: ModLocation,
+ natm_fileid :: DwarfFiles,
+ natm_debug_map :: LabelMap DebugBlock
}
+type DwarfFiles = UniqFM (FastString, Int)
+
newtype NatM result = NatM (NatM_State -> (result, NatM_State))
unNat :: NatM a -> NatM_State -> (a, NatM_State)
unNat (NatM a) = a
-mkNatM_State :: UniqSupply -> Int -> DynFlags -> Module -> NatM_State
+mkNatM_State :: UniqSupply -> Int -> DynFlags -> Module -> ModLocation -> DwarfFiles -> LabelMap DebugBlock -> NatM_State
mkNatM_State us delta dflags this_mod
= NatM_State us delta [] Nothing dflags this_mod
@@ -170,3 +187,18 @@ getPicBaseNat rep
-> do
reg <- getNewRegNat rep
NatM (\state -> (reg, state { natm_pic = Just reg }))
+
+getModLoc :: NatM ModLocation
+getModLoc
+ = NatM $ \ st -> (natm_modloc st, st)
+
+getFileId :: FastString -> NatM Int
+getFileId f = NatM $ \st ->
+ case lookupUFM (natm_fileid st) f of
+ Just (_,n) -> (n, st)
+ Nothing -> let n = 1 + sizeUFM (natm_fileid st)
+ fids = addToUFM (natm_fileid st) f (f,n)
+ in n `seq` fids `seq` (n, st { natm_fileid = fids })
+
+getDebugBlock :: Label -> NatM (Maybe DebugBlock)
+getDebugBlock l = NatM $ \st -> (mapLookup l (natm_debug_map st), st)
diff --git a/compiler/nativeGen/PIC.hs b/compiler/nativeGen/PIC.hs
index f988bf73c77c..9b5c080b5b03 100644
--- a/compiler/nativeGen/PIC.hs
+++ b/compiler/nativeGen/PIC.hs
@@ -728,9 +728,10 @@ initializePicBase_ppc ArchPPC os picReg
fetchPC (BasicBlock bID insns) =
BasicBlock bID (PPC.FETCHPC picReg
+ : PPC.ADDIS tmp picReg (PPC.HI offsetToOffset)
: PPC.LD PPC.archWordSize tmp
- (PPC.AddrRegImm picReg offsetToOffset)
- : PPC.ADD picReg picReg (PPC.RIReg tmp)
+ (PPC.AddrRegImm tmp (PPC.LO offsetToOffset))
+ : PPC.ADD picReg picReg (PPC.RIReg picReg)
: insns)
return (CmmProc info lab live (ListGraph blocks') : gotOffset : statics)
diff --git a/compiler/nativeGen/PPC/CodeGen.hs b/compiler/nativeGen/PPC/CodeGen.hs
index 3f0e7632f8ef..f4203c6f1b2b 100644
--- a/compiler/nativeGen/PPC/CodeGen.hs
+++ b/compiler/nativeGen/PPC/CodeGen.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP, GADTs #-}
-----------------------------------------------------------------------------
--
@@ -12,7 +13,6 @@
-- (c) the #if blah_TARGET_ARCH} things, the
-- structure should not be too overwhelming.
-{-# LANGUAGE GADTs #-}
module PPC.CodeGen (
cmmTopCodeGen,
generateJumpTableForInstr,
@@ -95,7 +95,8 @@ basicBlockCodeGen
, [NatCmmDecl CmmStatics Instr])
basicBlockCodeGen block = do
- let (CmmEntry id, nodes, tail) = blockSplit block
+ let (_, nodes, tail) = blockSplit block
+ id = entryLabel block
stmts = blockToList nodes
mid_instrs <- stmtsToInstrs stmts
tail_instrs <- stmtToInstrs tail
@@ -125,6 +126,8 @@ stmtToInstrs stmt = do
dflags <- getDynFlags
case stmt of
CmmComment s -> return (unitOL (COMMENT s))
+ CmmTick {} -> return nilOL
+ CmmUnwind {} -> return nilOL
CmmAssign reg src
| isFloatType ty -> assignReg_FltCode size reg src
@@ -813,15 +816,6 @@ genBranch = return . toOL . mkJumpInstr
Conditional jumps are always to local labels, so we can use branch
instructions. We peek at the arguments to decide what kind of
comparison to do.
-
-SPARC: First, we have to ensure that the condition codes are set
-according to the supplied comparison operation. We generate slightly
-different code for floating point comparisons, because a floating
-point operation cannot directly precede a @BF@. We assume the worst
-and fill that slot with a @NOP@.
-
-SPARC: Do not fill the delay slots here; you will confuse the register
-allocator.
-}
@@ -1160,6 +1154,12 @@ genCCall' dflags gcp target dest_regs args0
MO_BSwap w -> (fsLit $ bSwapLabel w, False)
MO_PopCnt w -> (fsLit $ popCntLabel w, False)
+ MO_Clz w -> (fsLit $ clzLabel w, False)
+ MO_Ctz w -> (fsLit $ ctzLabel w, False)
+ MO_AtomicRMW w amop -> (fsLit $ atomicRMWLabel w amop, False)
+ MO_Cmpxchg w -> (fsLit $ cmpxchgLabel w, False)
+ MO_AtomicRead w -> (fsLit $ atomicReadLabel w, False)
+ MO_AtomicWrite w -> (fsLit $ atomicWriteLabel w, False)
MO_S_QuotRem {} -> unsupported
MO_U_QuotRem {} -> unsupported
diff --git a/compiler/nativeGen/PPC/Cond.hs b/compiler/nativeGen/PPC/Cond.hs
index b8c5208c660b..0e4b1fd7014e 100644
--- a/compiler/nativeGen/PPC/Cond.hs
+++ b/compiler/nativeGen/PPC/Cond.hs
@@ -1,17 +1,9 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module PPC.Cond (
- Cond(..),
- condNegate,
- condUnsigned,
- condToSigned,
- condToUnsigned,
+ Cond(..),
+ condNegate,
+ condUnsigned,
+ condToSigned,
+ condToUnsigned,
)
where
@@ -19,18 +11,18 @@ where
import Panic
data Cond
- = ALWAYS
- | EQQ
- | GE
- | GEU
- | GTT
- | GU
- | LE
- | LEU
- | LTT
- | LU
- | NE
- deriving Eq
+ = ALWAYS
+ | EQQ
+ | GE
+ | GEU
+ | GTT
+ | GU
+ | LE
+ | LEU
+ | LTT
+ | LU
+ | NE
+ deriving Eq
condNegate :: Cond -> Cond
diff --git a/compiler/nativeGen/PPC/Instr.hs b/compiler/nativeGen/PPC/Instr.hs
index ddb9c51c7bbf..3756c649bbf8 100644
--- a/compiler/nativeGen/PPC/Instr.hs
+++ b/compiler/nativeGen/PPC/Instr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Machine-dependent assembly language
diff --git a/compiler/nativeGen/PPC/RegInfo.hs b/compiler/nativeGen/PPC/RegInfo.hs
index 8b35d875736a..c4724d4193ea 100644
--- a/compiler/nativeGen/PPC/RegInfo.hs
+++ b/compiler/nativeGen/PPC/RegInfo.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Machine-specific parts of the register allocator
@@ -5,20 +7,12 @@
-- (c) The University of Glasgow 1996-2004
--
-----------------------------------------------------------------------------
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module PPC.RegInfo (
JumpDest( DestBlockId ), getJumpDestBlockId,
- canShortcut,
- shortcutJump,
+ canShortcut,
+ shortcutJump,
- shortcutStatics
+ shortcutStatics
)
where
@@ -68,14 +62,13 @@ shortcutStatic fn (CmmStaticLit (CmmLabelDiffOff lbl1 lbl2 off))
shortcutStatic _ other_static
= other_static
-shortBlockId
- :: (BlockId -> Maybe JumpDest)
- -> BlockId
- -> CLabel
+shortBlockId
+ :: (BlockId -> Maybe JumpDest)
+ -> BlockId
+ -> CLabel
shortBlockId fn blockid =
case fn blockid of
Nothing -> mkAsmTempLabel uq
Just (DestBlockId blockid') -> shortBlockId fn blockid'
where uq = getUnique blockid
-
diff --git a/compiler/nativeGen/PPC/Regs.hs b/compiler/nativeGen/PPC/Regs.hs
index f92351bd22a0..0f636bf64c16 100644
--- a/compiler/nativeGen/PPC/Regs.hs
+++ b/compiler/nativeGen/PPC/Regs.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 1994-2004
diff --git a/compiler/nativeGen/Reg.hs b/compiler/nativeGen/Reg.hs
index fee74be35513..862306f0bba9 100644
--- a/compiler/nativeGen/Reg.hs
+++ b/compiler/nativeGen/Reg.hs
@@ -1,36 +1,27 @@
-
-- | An architecture independent description of a register.
--- This needs to stay architecture independent because it is used
--- by NCGMonad and the register allocators, which are shared
--- by all architectures.
+-- This needs to stay architecture independent because it is used
+-- by NCGMonad and the register allocators, which are shared
+-- by all architectures.
--
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module Reg (
- RegNo,
- Reg(..),
- regPair,
- regSingle,
- isRealReg, takeRealReg,
- isVirtualReg, takeVirtualReg,
-
- VirtualReg(..),
- renameVirtualReg,
- classOfVirtualReg,
- getHiVirtualRegFromLo,
- getHiVRegFromLo,
-
- RealReg(..),
- regNosOfRealReg,
- realRegsAlias,
-
- liftPatchFnToRegReg
+ RegNo,
+ Reg(..),
+ regPair,
+ regSingle,
+ isRealReg, takeRealReg,
+ isVirtualReg, takeVirtualReg,
+
+ VirtualReg(..),
+ renameVirtualReg,
+ classOfVirtualReg,
+ getHiVirtualRegFromLo,
+ getHiVRegFromLo,
+
+ RealReg(..),
+ regNosOfRealReg,
+ realRegsAlias,
+
+ liftPatchFnToRegReg
)
where
@@ -41,68 +32,68 @@ import RegClass
import Data.List
-- | An identifier for a primitive real machine register.
-type RegNo
- = Int
+type RegNo
+ = Int
-- VirtualRegs are virtual registers. The register allocator will
--- eventually have to map them into RealRegs, or into spill slots.
+-- eventually have to map them into RealRegs, or into spill slots.
--
--- VirtualRegs are allocated on the fly, usually to represent a single
--- value in the abstract assembly code (i.e. dynamic registers are
--- usually single assignment).
+-- VirtualRegs are allocated on the fly, usually to represent a single
+-- value in the abstract assembly code (i.e. dynamic registers are
+-- usually single assignment).
--
--- The single assignment restriction isn't necessary to get correct code,
--- although a better register allocation will result if single
--- assignment is used -- because the allocator maps a VirtualReg into
--- a single RealReg, even if the VirtualReg has multiple live ranges.
+-- The single assignment restriction isn't necessary to get correct code,
+-- although a better register allocation will result if single
+-- assignment is used -- because the allocator maps a VirtualReg into
+-- a single RealReg, even if the VirtualReg has multiple live ranges.
--
--- Virtual regs can be of either class, so that info is attached.
+-- Virtual regs can be of either class, so that info is attached.
--
data VirtualReg
- = VirtualRegI {-# UNPACK #-} !Unique
- | VirtualRegHi {-# UNPACK #-} !Unique -- High part of 2-word register
- | VirtualRegF {-# UNPACK #-} !Unique
- | VirtualRegD {-# UNPACK #-} !Unique
- | VirtualRegSSE {-# UNPACK #-} !Unique
- deriving (Eq, Show, Ord)
+ = VirtualRegI {-# UNPACK #-} !Unique
+ | VirtualRegHi {-# UNPACK #-} !Unique -- High part of 2-word register
+ | VirtualRegF {-# UNPACK #-} !Unique
+ | VirtualRegD {-# UNPACK #-} !Unique
+ | VirtualRegSSE {-# UNPACK #-} !Unique
+ deriving (Eq, Show, Ord)
instance Uniquable VirtualReg where
- getUnique reg
- = case reg of
- VirtualRegI u -> u
- VirtualRegHi u -> u
- VirtualRegF u -> u
- VirtualRegD u -> u
- VirtualRegSSE u -> u
+ getUnique reg
+ = case reg of
+ VirtualRegI u -> u
+ VirtualRegHi u -> u
+ VirtualRegF u -> u
+ VirtualRegD u -> u
+ VirtualRegSSE u -> u
instance Outputable VirtualReg where
- ppr reg
- = case reg of
- VirtualRegI u -> text "%vI_" <> pprUnique u
- VirtualRegHi u -> text "%vHi_" <> pprUnique u
- VirtualRegF u -> text "%vF_" <> pprUnique u
- VirtualRegD u -> text "%vD_" <> pprUnique u
- VirtualRegSSE u -> text "%vSSE_" <> pprUnique u
+ ppr reg
+ = case reg of
+ VirtualRegI u -> text "%vI_" <> pprUnique u
+ VirtualRegHi u -> text "%vHi_" <> pprUnique u
+ VirtualRegF u -> text "%vF_" <> pprUnique u
+ VirtualRegD u -> text "%vD_" <> pprUnique u
+ VirtualRegSSE u -> text "%vSSE_" <> pprUnique u
renameVirtualReg :: Unique -> VirtualReg -> VirtualReg
renameVirtualReg u r
= case r of
- VirtualRegI _ -> VirtualRegI u
- VirtualRegHi _ -> VirtualRegHi u
- VirtualRegF _ -> VirtualRegF u
- VirtualRegD _ -> VirtualRegD u
- VirtualRegSSE _ -> VirtualRegSSE u
+ VirtualRegI _ -> VirtualRegI u
+ VirtualRegHi _ -> VirtualRegHi u
+ VirtualRegF _ -> VirtualRegF u
+ VirtualRegD _ -> VirtualRegD u
+ VirtualRegSSE _ -> VirtualRegSSE u
classOfVirtualReg :: VirtualReg -> RegClass
classOfVirtualReg vr
= case vr of
- VirtualRegI{} -> RcInteger
- VirtualRegHi{} -> RcInteger
- VirtualRegF{} -> RcFloat
- VirtualRegD{} -> RcDouble
- VirtualRegSSE{} -> RcDoubleSSE
+ VirtualRegI{} -> RcInteger
+ VirtualRegHi{} -> RcInteger
+ VirtualRegF{} -> RcFloat
+ VirtualRegD{} -> RcDouble
+ VirtualRegSSE{} -> RcDoubleSSE
-- Determine the upper-half vreg for a 64-bit quantity on a 32-bit platform
@@ -111,118 +102,116 @@ classOfVirtualReg vr
getHiVirtualRegFromLo :: VirtualReg -> VirtualReg
getHiVirtualRegFromLo reg
= case reg of
- -- makes a pseudo-unique with tag 'H'
- VirtualRegI u -> VirtualRegHi (newTagUnique u 'H')
- _ -> panic "Reg.getHiVirtualRegFromLo"
+ -- makes a pseudo-unique with tag 'H'
+ VirtualRegI u -> VirtualRegHi (newTagUnique u 'H')
+ _ -> panic "Reg.getHiVirtualRegFromLo"
getHiVRegFromLo :: Reg -> Reg
getHiVRegFromLo reg
= case reg of
- RegVirtual vr -> RegVirtual (getHiVirtualRegFromLo vr)
- RegReal _ -> panic "Reg.getHiVRegFromLo"
-
+ RegVirtual vr -> RegVirtual (getHiVirtualRegFromLo vr)
+ RegReal _ -> panic "Reg.getHiVRegFromLo"
+
------------------------------------------------------------------------------------
-- | RealRegs are machine regs which are available for allocation, in
--- the usual way. We know what class they are, because that's part of
--- the processor's architecture.
+-- the usual way. We know what class they are, because that's part of
+-- the processor's architecture.
--
--- RealRegPairs are pairs of real registers that are allocated together
--- to hold a larger value, such as with Double regs on SPARC.
+-- RealRegPairs are pairs of real registers that are allocated together
+-- to hold a larger value, such as with Double regs on SPARC.
--
data RealReg
- = RealRegSingle {-# UNPACK #-} !RegNo
- | RealRegPair {-# UNPACK #-} !RegNo {-# UNPACK #-} !RegNo
- deriving (Eq, Show, Ord)
+ = RealRegSingle {-# UNPACK #-} !RegNo
+ | RealRegPair {-# UNPACK #-} !RegNo {-# UNPACK #-} !RegNo
+ deriving (Eq, Show, Ord)
instance Uniquable RealReg where
- getUnique reg
- = case reg of
- RealRegSingle i -> mkRegSingleUnique i
- RealRegPair r1 r2 -> mkRegPairUnique (r1 * 65536 + r2)
+ getUnique reg
+ = case reg of
+ RealRegSingle i -> mkRegSingleUnique i
+ RealRegPair r1 r2 -> mkRegPairUnique (r1 * 65536 + r2)
instance Outputable RealReg where
- ppr reg
- = case reg of
- RealRegSingle i -> text "%r" <> int i
- RealRegPair r1 r2 -> text "%r(" <> int r1 <> text "|" <> int r2 <> text ")"
+ ppr reg
+ = case reg of
+ RealRegSingle i -> text "%r" <> int i
+ RealRegPair r1 r2 -> text "%r(" <> int r1 <> text "|" <> int r2 <> text ")"
regNosOfRealReg :: RealReg -> [RegNo]
regNosOfRealReg rr
= case rr of
- RealRegSingle r1 -> [r1]
- RealRegPair r1 r2 -> [r1, r2]
-
+ RealRegSingle r1 -> [r1]
+ RealRegPair r1 r2 -> [r1, r2]
+
realRegsAlias :: RealReg -> RealReg -> Bool
realRegsAlias rr1 rr2
- = not $ null $ intersect (regNosOfRealReg rr1) (regNosOfRealReg rr2)
+ = not $ null $ intersect (regNosOfRealReg rr1) (regNosOfRealReg rr2)
--------------------------------------------------------------------------------
-- | A register, either virtual or real
data Reg
- = RegVirtual !VirtualReg
- | RegReal !RealReg
- deriving (Eq, Ord)
+ = RegVirtual !VirtualReg
+ | RegReal !RealReg
+ deriving (Eq, Ord)
regSingle :: RegNo -> Reg
-regSingle regNo = RegReal $ RealRegSingle regNo
+regSingle regNo = RegReal $ RealRegSingle regNo
regPair :: RegNo -> RegNo -> Reg
-regPair regNo1 regNo2 = RegReal $ RealRegPair regNo1 regNo2
+regPair regNo1 regNo2 = RegReal $ RealRegPair regNo1 regNo2
--- We like to have Uniques for Reg so that we can make UniqFM and UniqSets
+-- We like to have Uniques for Reg so that we can make UniqFM and UniqSets
-- in the register allocator.
instance Uniquable Reg where
- getUnique reg
- = case reg of
- RegVirtual vr -> getUnique vr
- RegReal rr -> getUnique rr
-
+ getUnique reg
+ = case reg of
+ RegVirtual vr -> getUnique vr
+ RegReal rr -> getUnique rr
+
-- | Print a reg in a generic manner
--- If you want the architecture specific names, then use the pprReg
--- function from the appropriate Ppr module.
+-- If you want the architecture specific names, then use the pprReg
+-- function from the appropriate Ppr module.
instance Outputable Reg where
- ppr reg
- = case reg of
- RegVirtual vr -> ppr vr
- RegReal rr -> ppr rr
+ ppr reg
+ = case reg of
+ RegVirtual vr -> ppr vr
+ RegReal rr -> ppr rr
isRealReg :: Reg -> Bool
-isRealReg reg
+isRealReg reg
= case reg of
- RegReal _ -> True
- RegVirtual _ -> False
+ RegReal _ -> True
+ RegVirtual _ -> False
takeRealReg :: Reg -> Maybe RealReg
takeRealReg reg
= case reg of
- RegReal rr -> Just rr
- _ -> Nothing
+ RegReal rr -> Just rr
+ _ -> Nothing
isVirtualReg :: Reg -> Bool
isVirtualReg reg
= case reg of
- RegReal _ -> False
- RegVirtual _ -> True
+ RegReal _ -> False
+ RegVirtual _ -> True
takeVirtualReg :: Reg -> Maybe VirtualReg
takeVirtualReg reg
= case reg of
- RegReal _ -> Nothing
- RegVirtual vr -> Just vr
+ RegReal _ -> Nothing
+ RegVirtual vr -> Just vr
-- | The patch function supplied by the allocator maps VirtualReg to RealReg
--- regs, but sometimes we want to apply it to plain old Reg.
+-- regs, but sometimes we want to apply it to plain old Reg.
--
liftPatchFnToRegReg :: (VirtualReg -> RealReg) -> (Reg -> Reg)
liftPatchFnToRegReg patchF reg
= case reg of
- RegVirtual vr -> RegReal (patchF vr)
- RegReal _ -> reg
-
-
+ RegVirtual vr -> RegReal (patchF vr)
+ RegReal _ -> reg
diff --git a/compiler/nativeGen/RegAlloc/Graph/Main.hs b/compiler/nativeGen/RegAlloc/Graph/Main.hs
index dbaf5098ceb8..05db68dd463e 100644
--- a/compiler/nativeGen/RegAlloc/Graph/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Graph/Main.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE ScopedTypeVariables #-}
+
-- | Graph coloring register allocator.
module RegAlloc.Graph.Main (
regAlloc
diff --git a/compiler/nativeGen/RegAlloc/Graph/Stats.hs b/compiler/nativeGen/RegAlloc/Graph/Stats.hs
index 7bc842d1c9d3..8fada96ee25b 100644
--- a/compiler/nativeGen/RegAlloc/Graph/Stats.hs
+++ b/compiler/nativeGen/RegAlloc/Graph/Stats.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE BangPatterns, CPP #-}
-- | Carries interesting info for debugging / profiling of the
-- graph coloring register allocator.
diff --git a/compiler/nativeGen/RegAlloc/Graph/TrivColorable.hs b/compiler/nativeGen/RegAlloc/Graph/TrivColorable.hs
index df3c7d6d41f2..eba2e4314995 100644
--- a/compiler/nativeGen/RegAlloc/Graph/TrivColorable.hs
+++ b/compiler/nativeGen/RegAlloc/Graph/TrivColorable.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP #-}
module RegAlloc.Graph.TrivColorable (
trivColorable,
@@ -30,7 +30,7 @@ import Panic
-- (which are disjoint) ie. x86, x86_64 and ppc
--
-- The number of allocatable regs is hard coded in here so we can do
--- a fast comparision in trivColorable.
+-- a fast comparison in trivColorable.
--
-- It's ok if these numbers are _less_ than the actual number of free
-- regs, but they can't be more or the register conflict
@@ -113,6 +113,7 @@ trivColorable platform virtualRegSqueeze realRegSqueeze RcInteger conflicts excl
ArchSPARC -> 14
ArchPPC_64 -> panic "trivColorable ArchPPC_64"
ArchARM _ _ _ -> panic "trivColorable ArchARM"
+ ArchARM64 -> panic "trivColorable ArchARM64"
ArchAlpha -> panic "trivColorable ArchAlpha"
ArchMipseb -> panic "trivColorable ArchMipseb"
ArchMipsel -> panic "trivColorable ArchMipsel"
@@ -137,6 +138,7 @@ trivColorable platform virtualRegSqueeze realRegSqueeze RcFloat conflicts exclus
ArchSPARC -> 22
ArchPPC_64 -> panic "trivColorable ArchPPC_64"
ArchARM _ _ _ -> panic "trivColorable ArchARM"
+ ArchARM64 -> panic "trivColorable ArchARM64"
ArchAlpha -> panic "trivColorable ArchAlpha"
ArchMipseb -> panic "trivColorable ArchMipseb"
ArchMipsel -> panic "trivColorable ArchMipsel"
@@ -161,6 +163,7 @@ trivColorable platform virtualRegSqueeze realRegSqueeze RcDouble conflicts exclu
ArchSPARC -> 11
ArchPPC_64 -> panic "trivColorable ArchPPC_64"
ArchARM _ _ _ -> panic "trivColorable ArchARM"
+ ArchARM64 -> panic "trivColorable ArchARM64"
ArchAlpha -> panic "trivColorable ArchAlpha"
ArchMipseb -> panic "trivColorable ArchMipseb"
ArchMipsel -> panic "trivColorable ArchMipsel"
@@ -185,6 +188,7 @@ trivColorable platform virtualRegSqueeze realRegSqueeze RcDoubleSSE conflicts ex
ArchSPARC -> 0
ArchPPC_64 -> panic "trivColorable ArchPPC_64"
ArchARM _ _ _ -> panic "trivColorable ArchARM"
+ ArchARM64 -> panic "trivColorable ArchARM64"
ArchAlpha -> panic "trivColorable ArchAlpha"
ArchMipseb -> panic "trivColorable ArchMipseb"
ArchMipsel -> panic "trivColorable ArchMipsel"
diff --git a/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs b/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
index 557d713fe36f..a1a00ba5829f 100644
--- a/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE CPP #-}
module RegAlloc.Linear.FreeRegs (
FR(..),
@@ -74,6 +75,7 @@ maxSpillSlots dflags
ArchPPC -> PPC.Instr.maxSpillSlots dflags
ArchSPARC -> SPARC.Instr.maxSpillSlots dflags
ArchARM _ _ _ -> panic "maxSpillSlots ArchARM"
+ ArchARM64 -> panic "maxSpillSlots ArchARM64"
ArchPPC_64 -> panic "maxSpillSlots ArchPPC_64"
ArchAlpha -> panic "maxSpillSlots ArchAlpha"
ArchMipseb -> panic "maxSpillSlots ArchMipseb"
diff --git a/compiler/nativeGen/RegAlloc/Linear/Main.hs b/compiler/nativeGen/RegAlloc/Linear/Main.hs
index 6ac19dad40c7..9b4fe7b8a047 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Main.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
-----------------------------------------------------------------------------
--
-- The register allocator
@@ -156,11 +158,11 @@ regAlloc _ (CmmProc (LiveInfo info _ _ _) lbl live [])
, Nothing )
regAlloc dflags (CmmProc static lbl live sccs)
- | LiveInfo info (Just first_id) (Just block_live) _ <- static
+ | LiveInfo info entry_ids@(first_id:_) (Just block_live) _ <- static
= do
-- do register allocation on each component.
(final_blocks, stats, stack_use)
- <- linearRegAlloc dflags first_id block_live sccs
+ <- linearRegAlloc dflags entry_ids block_live sccs
-- make sure the block that was first in the input list
-- stays at the front of the output
@@ -194,45 +196,50 @@ regAlloc _ (CmmProc _ _ _ _)
linearRegAlloc
:: (Outputable instr, Instruction instr)
=> DynFlags
- -> BlockId -- ^ the first block
- -> BlockMap RegSet -- ^ live regs on entry to each basic block
- -> [SCC (LiveBasicBlock instr)] -- ^ instructions annotated with "deaths"
+ -> [BlockId] -- ^ entry points
+ -> BlockMap RegSet
+ -- ^ live regs on entry to each basic block
+ -> [SCC (LiveBasicBlock instr)]
+ -- ^ instructions annotated with "deaths"
-> UniqSM ([NatBasicBlock instr], RegAllocStats, Int)
-linearRegAlloc dflags first_id block_live sccs
- = let platform = targetPlatform dflags
- in case platformArch platform of
- ArchX86 -> linearRegAlloc' dflags (frInitFreeRegs platform :: X86.FreeRegs) first_id block_live sccs
- ArchX86_64 -> linearRegAlloc' dflags (frInitFreeRegs platform :: X86_64.FreeRegs) first_id block_live sccs
- ArchSPARC -> linearRegAlloc' dflags (frInitFreeRegs platform :: SPARC.FreeRegs) first_id block_live sccs
- ArchPPC -> linearRegAlloc' dflags (frInitFreeRegs platform :: PPC.FreeRegs) first_id block_live sccs
- ArchARM _ _ _ -> panic "linearRegAlloc ArchARM"
- ArchPPC_64 -> panic "linearRegAlloc ArchPPC_64"
- ArchAlpha -> panic "linearRegAlloc ArchAlpha"
- ArchMipseb -> panic "linearRegAlloc ArchMipseb"
- ArchMipsel -> panic "linearRegAlloc ArchMipsel"
+linearRegAlloc dflags entry_ids block_live sccs
+ = case platformArch platform of
+ ArchX86 -> go $ (frInitFreeRegs platform :: X86.FreeRegs)
+ ArchX86_64 -> go $ (frInitFreeRegs platform :: X86_64.FreeRegs)
+ ArchSPARC -> go $ (frInitFreeRegs platform :: SPARC.FreeRegs)
+ ArchPPC -> go $ (frInitFreeRegs platform :: PPC.FreeRegs)
+ ArchARM _ _ _ -> panic "linearRegAlloc ArchARM"
+ ArchARM64 -> panic "linearRegAlloc ArchARM64"
+ ArchPPC_64 -> panic "linearRegAlloc ArchPPC_64"
+ ArchAlpha -> panic "linearRegAlloc ArchAlpha"
+ ArchMipseb -> panic "linearRegAlloc ArchMipseb"
+ ArchMipsel -> panic "linearRegAlloc ArchMipsel"
ArchJavaScript -> panic "linearRegAlloc ArchJavaScript"
- ArchUnknown -> panic "linearRegAlloc ArchUnknown"
+ ArchUnknown -> panic "linearRegAlloc ArchUnknown"
+ where
+ go f = linearRegAlloc' dflags f entry_ids block_live sccs
+ platform = targetPlatform dflags
linearRegAlloc'
:: (FR freeRegs, Outputable instr, Instruction instr)
=> DynFlags
-> freeRegs
- -> BlockId -- ^ the first block
+ -> [BlockId] -- ^ entry points
-> BlockMap RegSet -- ^ live regs on entry to each basic block
-> [SCC (LiveBasicBlock instr)] -- ^ instructions annotated with "deaths"
-> UniqSM ([NatBasicBlock instr], RegAllocStats, Int)
-linearRegAlloc' dflags initFreeRegs first_id block_live sccs
+linearRegAlloc' dflags initFreeRegs entry_ids block_live sccs
= do us <- getUs
let (_, stack, stats, blocks) =
runR dflags emptyBlockMap initFreeRegs emptyRegMap (emptyStackMap dflags) us
- $ linearRA_SCCs first_id block_live [] sccs
+ $ linearRA_SCCs entry_ids block_live [] sccs
return (blocks, stats, getStackUse stack)
linearRA_SCCs :: (FR freeRegs, Instruction instr, Outputable instr)
- => BlockId
+ => [BlockId]
-> BlockMap RegSet
-> [NatBasicBlock instr]
-> [SCC (LiveBasicBlock instr)]
@@ -241,16 +248,16 @@ linearRA_SCCs :: (FR freeRegs, Instruction instr, Outputable instr)
linearRA_SCCs _ _ blocksAcc []
= return $ reverse blocksAcc
-linearRA_SCCs first_id block_live blocksAcc (AcyclicSCC block : sccs)
+linearRA_SCCs entry_ids block_live blocksAcc (AcyclicSCC block : sccs)
= do blocks' <- processBlock block_live block
- linearRA_SCCs first_id block_live
+ linearRA_SCCs entry_ids block_live
((reverse blocks') ++ blocksAcc)
sccs
-linearRA_SCCs first_id block_live blocksAcc (CyclicSCC blocks : sccs)
+linearRA_SCCs entry_ids block_live blocksAcc (CyclicSCC blocks : sccs)
= do
- blockss' <- process first_id block_live blocks [] (return []) False
- linearRA_SCCs first_id block_live
+ blockss' <- process entry_ids block_live blocks [] (return []) False
+ linearRA_SCCs entry_ids block_live
(reverse (concat blockss') ++ blocksAcc)
sccs
@@ -267,7 +274,7 @@ linearRA_SCCs first_id block_live blocksAcc (CyclicSCC blocks : sccs)
-}
process :: (FR freeRegs, Instruction instr, Outputable instr)
- => BlockId
+ => [BlockId]
-> BlockMap RegSet
-> [GenBasicBlock (LiveInstr instr)]
-> [GenBasicBlock (LiveInstr instr)]
@@ -278,7 +285,7 @@ process :: (FR freeRegs, Instruction instr, Outputable instr)
process _ _ [] [] accum _
= return $ reverse accum
-process first_id block_live [] next_round accum madeProgress
+process entry_ids block_live [] next_round accum madeProgress
| not madeProgress
{- BUGS: There are so many unreachable blocks in the code the warnings are overwhelming.
@@ -288,22 +295,22 @@ process first_id block_live [] next_round accum madeProgress
= return $ reverse accum
| otherwise
- = process first_id block_live
+ = process entry_ids block_live
next_round [] accum False
-process first_id block_live (b@(BasicBlock id _) : blocks)
+process entry_ids block_live (b@(BasicBlock id _) : blocks)
next_round accum madeProgress
= do
block_assig <- getBlockAssigR
if isJust (mapLookup id block_assig)
- || id == first_id
+ || id `elem` entry_ids
then do
b' <- processBlock block_live b
- process first_id block_live blocks
+ process entry_ids block_live blocks
next_round (b' : accum) True
- else process first_id block_live blocks
+ else process entry_ids block_live blocks
(b : next_round) accum madeProgress
@@ -394,9 +401,9 @@ raInsn _ new_instrs _ (LiveInstr ii Nothing)
= do setDeltaR n
return (new_instrs, [])
-raInsn _ new_instrs _ (LiveInstr ii Nothing)
+raInsn _ new_instrs _ (LiveInstr ii@(Instr i) Nothing)
| isMetaInstr ii
- = return (new_instrs, [])
+ = return (i : new_instrs, [])
raInsn block_live new_instrs id (LiveInstr (Instr instr) (Just live))
diff --git a/compiler/nativeGen/RegAlloc/Linear/PPC/FreeRegs.hs b/compiler/nativeGen/RegAlloc/Linear/PPC/FreeRegs.hs
index 0bdb49fb2ee5..b76fe79d7dbb 100644
--- a/compiler/nativeGen/RegAlloc/Linear/PPC/FreeRegs.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/PPC/FreeRegs.hs
@@ -1,4 +1,3 @@
-
-- | Free regs map for PowerPC
module RegAlloc.Linear.PPC.FreeRegs
where
diff --git a/compiler/nativeGen/RegAlloc/Linear/State.hs b/compiler/nativeGen/RegAlloc/Linear/State.hs
index dc499c9c1f62..39b5777ef32e 100644
--- a/compiler/nativeGen/RegAlloc/Linear/State.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/State.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE UnboxedTuples #-}
+
-- | State monad for the linear register allocator.
-- Here we keep all the state that the register allocator keeps track
diff --git a/compiler/nativeGen/RegAlloc/Liveness.hs b/compiler/nativeGen/RegAlloc/Liveness.hs
index 6dd4cec0de5b..d7fd8bdcb40b 100644
--- a/compiler/nativeGen/RegAlloc/Liveness.hs
+++ b/compiler/nativeGen/RegAlloc/Liveness.hs
@@ -1,3 +1,8 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE TypeFamilies #-}
+
-----------------------------------------------------------------------------
--
-- The register liveness determinator
@@ -5,6 +10,7 @@
-- (c) The University of Glasgow 2004-2013
--
-----------------------------------------------------------------------------
+
module RegAlloc.Liveness (
RegSet,
RegMap, emptyRegMap,
@@ -163,10 +169,11 @@ data Liveness
-- | Stash regs live on entry to each basic block in the info part of the cmm code.
data LiveInfo
= LiveInfo
- (BlockEnv CmmStatics) -- cmm info table static stuff
- (Maybe BlockId) -- id of the first block
- (Maybe (BlockMap RegSet)) -- argument locals live on entry to this block
- (Map BlockId (Set Int)) -- stack slots live on entry to this block
+ (BlockEnv CmmStatics) -- cmm info table static stuff
+ [BlockId] -- entry points (first one is the
+ -- entry point for the proc).
+ (Maybe (BlockMap RegSet)) -- argument locals live on entry to this block
+ (Map BlockId (Set Int)) -- stack slots live on entry to this block
-- | A basic block with liveness information.
@@ -217,9 +224,9 @@ instance Outputable instr
| otherwise = name <> (hcat $ punctuate space $ map ppr $ uniqSetToList regs)
instance Outputable LiveInfo where
- ppr (LiveInfo mb_static firstId liveVRegsOnEntry liveSlotsOnEntry)
+ ppr (LiveInfo mb_static entryIds liveVRegsOnEntry liveSlotsOnEntry)
= (ppr mb_static)
- $$ text "# firstId = " <> ppr firstId
+ $$ text "# entryIds = " <> ppr entryIds
$$ text "# liveVRegsOnEntry = " <> ppr liveVRegsOnEntry
$$ text "# liveSlotsOnEntry = " <> text (show liveSlotsOnEntry)
@@ -474,7 +481,7 @@ stripLive dflags live
where stripCmm :: (Outputable statics, Outputable instr, Instruction instr)
=> LiveCmmDecl statics instr -> NatCmmDecl statics instr
stripCmm (CmmData sec ds) = CmmData sec ds
- stripCmm (CmmProc (LiveInfo info (Just first_id) _ _) label live sccs)
+ stripCmm (CmmProc (LiveInfo info (first_id:_) _ _) label live sccs)
= let final_blocks = flattenSCCs sccs
-- make sure the block that was first in the input list
@@ -487,7 +494,7 @@ stripLive dflags live
(ListGraph $ map (stripLiveBlock dflags) $ first' : rest')
-- procs used for stg_split_markers don't contain any blocks, and have no first_id.
- stripCmm (CmmProc (LiveInfo info Nothing _ _) label live [])
+ stripCmm (CmmProc (LiveInfo info [] _ _) label live [])
= CmmProc info label live (ListGraph [])
-- If the proc has blocks but we don't know what the first one was, then we're dead.
@@ -635,16 +642,19 @@ natCmmTopToLive (CmmData i d)
= CmmData i d
natCmmTopToLive (CmmProc info lbl live (ListGraph []))
- = CmmProc (LiveInfo info Nothing Nothing Map.empty) lbl live []
+ = CmmProc (LiveInfo info [] Nothing Map.empty) lbl live []
natCmmTopToLive proc@(CmmProc info lbl live (ListGraph blocks@(first : _)))
= let first_id = blockId first
- sccs = sccBlocks blocks (entryBlocks proc)
+ all_entry_ids = entryBlocks proc
+ sccs = sccBlocks blocks all_entry_ids
+ entry_ids = filter (/= first_id) all_entry_ids
sccsLive = map (fmap (\(BasicBlock l instrs) ->
BasicBlock l (map (\i -> LiveInstr (Instr i) Nothing) instrs)))
$ sccs
- in CmmProc (LiveInfo info (Just first_id) Nothing Map.empty) lbl live sccsLive
+ in CmmProc (LiveInfo info (first_id : entry_ids) Nothing Map.empty)
+ lbl live sccsLive
--
@@ -665,14 +675,20 @@ sccBlocks
sccBlocks blocks entries = map (fmap get_node) sccs
where
- sccs = stronglyConnCompFromG graph roots
-
- graph = graphFromEdgedVertices nodes
-
-- nodes :: [(NatBasicBlock instr, Unique, [Unique])]
nodes = [ (block, id, getOutEdges instrs)
| block@(BasicBlock id instrs) <- blocks ]
+ g1 = graphFromEdgedVertices nodes
+
+ reachable :: BlockSet
+ reachable = setFromList [ id | (_,id,_) <- reachablesG g1 roots ]
+
+ g2 = graphFromEdgedVertices [ node | node@(_,id,_) <- nodes
+ , id `setMember` reachable ]
+
+ sccs = stronglyConnCompG g2
+
get_node (n, _, _) = n
getOutEdges :: Instruction instr => [instr] -> [BlockId]
diff --git a/compiler/nativeGen/RegClass.hs b/compiler/nativeGen/RegClass.hs
index 7ccc0c1bec93..0c793173cbc0 100644
--- a/compiler/nativeGen/RegClass.hs
+++ b/compiler/nativeGen/RegClass.hs
@@ -1,41 +1,33 @@
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-
-- | An architecture independent description of a register's class.
-module RegClass
- ( RegClass (..) )
+module RegClass
+ ( RegClass (..) )
where
-import Outputable
-import Unique
+import Outputable
+import Unique
--- | The class of a register.
--- Used in the register allocator.
--- We treat all registers in a class as being interchangable.
+-- | The class of a register.
+-- Used in the register allocator.
+-- We treat all registers in a class as being interchangable.
--
-data RegClass
- = RcInteger
- | RcFloat
- | RcDouble
- | RcDoubleSSE -- x86 only: the SSE regs are a separate class
- deriving Eq
+data RegClass
+ = RcInteger
+ | RcFloat
+ | RcDouble
+ | RcDoubleSSE -- x86 only: the SSE regs are a separate class
+ deriving Eq
instance Uniquable RegClass where
- getUnique RcInteger = mkRegClassUnique 0
- getUnique RcFloat = mkRegClassUnique 1
- getUnique RcDouble = mkRegClassUnique 2
+ getUnique RcInteger = mkRegClassUnique 0
+ getUnique RcFloat = mkRegClassUnique 1
+ getUnique RcDouble = mkRegClassUnique 2
getUnique RcDoubleSSE = mkRegClassUnique 3
instance Outputable RegClass where
- ppr RcInteger = Outputable.text "I"
- ppr RcFloat = Outputable.text "F"
- ppr RcDouble = Outputable.text "D"
- ppr RcDoubleSSE = Outputable.text "S"
+ ppr RcInteger = Outputable.text "I"
+ ppr RcFloat = Outputable.text "F"
+ ppr RcDouble = Outputable.text "D"
+ ppr RcDoubleSSE = Outputable.text "S"
diff --git a/compiler/nativeGen/SPARC/CodeGen.hs b/compiler/nativeGen/SPARC/CodeGen.hs
index 5d65b427e13a..6854479cf1aa 100644
--- a/compiler/nativeGen/SPARC/CodeGen.hs
+++ b/compiler/nativeGen/SPARC/CodeGen.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Generating machine code (instruction selection)
@@ -85,7 +87,8 @@ basicBlockCodeGen :: CmmBlock
, [NatCmmDecl CmmStatics Instr])
basicBlockCodeGen block = do
- let (CmmEntry id, nodes, tail) = blockSplit block
+ let (_, nodes, tail) = blockSplit block
+ id = entryLabel block
stmts = blockToList nodes
mid_instrs <- stmtsToInstrs stmts
tail_instrs <- stmtToInstrs tail
@@ -123,6 +126,8 @@ stmtToInstrs stmt = do
dflags <- getDynFlags
case stmt of
CmmComment s -> return (unitOL (COMMENT s))
+ CmmTick {} -> return nilOL
+ CmmUnwind {} -> return nilOL
CmmAssign reg src
| isFloatType ty -> assignReg_FltCode size reg src
@@ -652,6 +657,12 @@ outOfLineMachOp_table mop
MO_BSwap w -> fsLit $ bSwapLabel w
MO_PopCnt w -> fsLit $ popCntLabel w
+ MO_Clz w -> fsLit $ clzLabel w
+ MO_Ctz w -> fsLit $ ctzLabel w
+ MO_AtomicRMW w amop -> fsLit $ atomicRMWLabel w amop
+ MO_Cmpxchg w -> fsLit $ cmpxchgLabel w
+ MO_AtomicRead w -> fsLit $ atomicReadLabel w
+ MO_AtomicWrite w -> fsLit $ atomicWriteLabel w
MO_S_QuotRem {} -> unsupported
MO_U_QuotRem {} -> unsupported
diff --git a/compiler/nativeGen/SPARC/CodeGen/Amode.hs b/compiler/nativeGen/SPARC/CodeGen/Amode.hs
index 324eda94e75e..8d9a303f2f4e 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Amode.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Amode.hs
@@ -1,13 +1,5 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.CodeGen.Amode (
- getAmode
+ getAmode
)
where
@@ -28,11 +20,11 @@ import OrdList
-- | Generate code to reference a memory address.
-getAmode
- :: CmmExpr -- ^ expr producing an address
- -> NatM Amode
+getAmode
+ :: CmmExpr -- ^ expr producing an address
+ -> NatM Amode
-getAmode tree@(CmmRegOff _ _)
+getAmode tree@(CmmRegOff _ _)
= do dflags <- getDynFlags
getAmode (mangleIndexTree dflags tree)
@@ -50,7 +42,7 @@ getAmode (CmmMachOp (MO_Add _) [x, CmmLit (CmmInt i _)])
= do
(reg, code) <- getSomeReg x
let
- off = ImmInt (fromInteger i)
+ off = ImmInt (fromInteger i)
return (Amode (AddrRegImm reg off) code)
getAmode (CmmMachOp (MO_Add _) [x, y])
@@ -58,23 +50,23 @@ getAmode (CmmMachOp (MO_Add _) [x, y])
(regX, codeX) <- getSomeReg x
(regY, codeY) <- getSomeReg y
let
- code = codeX `appOL` codeY
+ code = codeX `appOL` codeY
return (Amode (AddrRegReg regX regY) code)
getAmode (CmmLit lit)
= do
- let imm__2 = litToImm lit
- tmp1 <- getNewRegNat II32
- tmp2 <- getNewRegNat II32
+ let imm__2 = litToImm lit
+ tmp1 <- getNewRegNat II32
+ tmp2 <- getNewRegNat II32
+
+ let code = toOL [ SETHI (HI imm__2) tmp1
+ , OR False tmp1 (RIImm (LO imm__2)) tmp2]
- let code = toOL [ SETHI (HI imm__2) tmp1
- , OR False tmp1 (RIImm (LO imm__2)) tmp2]
-
- return (Amode (AddrRegReg tmp2 g0) code)
+ return (Amode (AddrRegReg tmp2 g0) code)
getAmode other
= do
(reg, code) <- getSomeReg other
let
- off = ImmInt 0
+ off = ImmInt 0
return (Amode (AddrRegImm reg off) code)
diff --git a/compiler/nativeGen/SPARC/CodeGen/Base.hs b/compiler/nativeGen/SPARC/CodeGen/Base.hs
index 03b31e016a4d..270fd699b009 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Base.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Base.hs
@@ -1,22 +1,14 @@
+module SPARC.CodeGen.Base (
+ InstrBlock,
+ CondCode(..),
+ ChildCode64(..),
+ Amode(..),
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
+ Register(..),
+ setSizeOfRegister,
-module SPARC.CodeGen.Base (
- InstrBlock,
- CondCode(..),
- ChildCode64(..),
- Amode(..),
-
- Register(..),
- setSizeOfRegister,
-
- getRegisterReg,
- mangleIndexTree
+ getRegisterReg,
+ mangleIndexTree
)
where
@@ -39,63 +31,63 @@ import OrdList
--------------------------------------------------------------------------------
-- | 'InstrBlock's are the insn sequences generated by the insn selectors.
--- They are really trees of insns to facilitate fast appending, where a
--- left-to-right traversal yields the insns in the correct order.
+-- They are really trees of insns to facilitate fast appending, where a
+-- left-to-right traversal yields the insns in the correct order.
--
-type InstrBlock
- = OrdList Instr
+type InstrBlock
+ = OrdList Instr
-- | Condition codes passed up the tree.
--
-data CondCode
- = CondCode Bool Cond InstrBlock
+data CondCode
+ = CondCode Bool Cond InstrBlock
-- | a.k.a "Register64"
--- Reg is the lower 32-bit temporary which contains the result.
--- Use getHiVRegFromLo to find the other VRegUnique.
+-- Reg is the lower 32-bit temporary which contains the result.
+-- Use getHiVRegFromLo to find the other VRegUnique.
--
--- Rules of this simplified insn selection game are therefore that
--- the returned Reg may be modified
+-- Rules of this simplified insn selection game are therefore that
+-- the returned Reg may be modified
--
-data ChildCode64
- = ChildCode64
+data ChildCode64
+ = ChildCode64
InstrBlock
- Reg
+ Reg
-- | Holds code that references a memory address.
-data Amode
- = Amode
- -- the AddrMode we can use in the instruction
- -- that does the real load\/store.
- AddrMode
+data Amode
+ = Amode
+ -- the AddrMode we can use in the instruction
+ -- that does the real load\/store.
+ AddrMode
- -- other setup code we have to run first before we can use the
- -- above AddrMode.
- InstrBlock
+ -- other setup code we have to run first before we can use the
+ -- above AddrMode.
+ InstrBlock
--------------------------------------------------------------------------------
-- | Code to produce a result into a register.
--- If the result must go in a specific register, it comes out as Fixed.
--- Otherwise, the parent can decide which register to put it in.
+-- If the result must go in a specific register, it comes out as Fixed.
+-- Otherwise, the parent can decide which register to put it in.
--
data Register
- = Fixed Size Reg InstrBlock
- | Any Size (Reg -> InstrBlock)
+ = Fixed Size Reg InstrBlock
+ | Any Size (Reg -> InstrBlock)
-- | Change the size field in a Register.
setSizeOfRegister
- :: Register -> Size -> Register
+ :: Register -> Size -> Register
setSizeOfRegister reg size
= case reg of
- Fixed _ reg code -> Fixed size reg code
- Any _ codefn -> Any size codefn
+ Fixed _ reg code -> Fixed size reg code
+ Any _ codefn -> Any size codefn
--------------------------------------------------------------------------------
@@ -103,7 +95,7 @@ setSizeOfRegister reg size
getRegisterReg :: Platform -> CmmReg -> Reg
getRegisterReg _ (CmmLocal (LocalReg u pk))
- = RegVirtual $ mkVirtualReg u (cmmTypeSize pk)
+ = RegVirtual $ mkVirtualReg u (cmmTypeSize pk)
getRegisterReg platform (CmmGlobal mid)
= case globalRegMaybe platform mid of
@@ -118,12 +110,8 @@ getRegisterReg platform (CmmGlobal mid)
mangleIndexTree :: DynFlags -> CmmExpr -> CmmExpr
mangleIndexTree dflags (CmmRegOff reg off)
- = CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
- where width = typeWidth (cmmRegType dflags reg)
+ = CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
+ where width = typeWidth (cmmRegType dflags reg)
mangleIndexTree _ _
- = panic "SPARC.CodeGen.Base.mangleIndexTree: no match"
-
-
-
-
+ = panic "SPARC.CodeGen.Base.mangleIndexTree: no match"
diff --git a/compiler/nativeGen/SPARC/CodeGen/CondCode.hs b/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
index 375a9e1b3390..cb10830f465d 100644
--- a/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
@@ -1,15 +1,7 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.CodeGen.CondCode (
- getCondCode,
- condIntCode,
- condFltCode
+ getCondCode,
+ condIntCode,
+ condFltCode
)
where
@@ -32,7 +24,7 @@ import Outputable
getCondCode :: CmmExpr -> NatM CondCode
getCondCode (CmmMachOp mop [x, y])
- =
+ =
case mop of
MO_F_Eq W32 -> condFltCode EQQ x y
MO_F_Ne W32 -> condFltCode NE x y
@@ -86,8 +78,8 @@ condIntCode cond x y = do
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
let
- code__2 = code1 `appOL` code2 `snocOL`
- SUB False True src1 (RIReg src2) g0
+ code__2 = code1 `appOL` code2 `snocOL`
+ SUB False True src1 (RIReg src2) g0
return (CondCode False cond code__2)
@@ -98,19 +90,19 @@ condFltCode cond x y = do
(src2, code2) <- getSomeReg y
tmp <- getNewRegNat FF64
let
- promote x = FxTOy FF32 FF64 x tmp
-
- pk1 = cmmExprType dflags x
- pk2 = cmmExprType dflags y
-
- code__2 =
- if pk1 `cmmEqType` pk2 then
- code1 `appOL` code2 `snocOL`
- FCMP True (cmmTypeSize pk1) src1 src2
- else if typeWidth pk1 == W32 then
- code1 `snocOL` promote src1 `appOL` code2 `snocOL`
- FCMP True FF64 tmp src2
- else
- code1 `appOL` code2 `snocOL` promote src2 `snocOL`
- FCMP True FF64 src1 tmp
+ promote x = FxTOy FF32 FF64 x tmp
+
+ pk1 = cmmExprType dflags x
+ pk2 = cmmExprType dflags y
+
+ code__2 =
+ if pk1 `cmmEqType` pk2 then
+ code1 `appOL` code2 `snocOL`
+ FCMP True (cmmTypeSize pk1) src1 src2
+ else if typeWidth pk1 == W32 then
+ code1 `snocOL` promote src1 `appOL` code2 `snocOL`
+ FCMP True FF64 tmp src2
+ else
+ code1 `appOL` code2 `snocOL` promote src2 `snocOL`
+ FCMP True FF64 src1 tmp
return (CondCode True cond code__2)
diff --git a/compiler/nativeGen/SPARC/CodeGen/Expand.hs b/compiler/nativeGen/SPARC/CodeGen/Expand.hs
index 03f571c20bb7..1d4d1379a58f 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Expand.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Expand.hs
@@ -1,14 +1,6 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-- | Expand out synthetic instructions into single machine instrs.
module SPARC.CodeGen.Expand (
- expandTop
+ expandTop
)
where
@@ -17,7 +9,7 @@ import SPARC.Instr
import SPARC.Imm
import SPARC.AddrMode
import SPARC.Regs
-import SPARC.Ppr ()
+import SPARC.Ppr ()
import Instruction
import Reg
import Size
@@ -30,139 +22,132 @@ import OrdList
-- | Expand out synthetic instructions in this top level thing
expandTop :: NatCmmDecl CmmStatics Instr -> NatCmmDecl CmmStatics Instr
expandTop top@(CmmData{})
- = top
+ = top
expandTop (CmmProc info lbl live (ListGraph blocks))
- = CmmProc info lbl live (ListGraph $ map expandBlock blocks)
+ = CmmProc info lbl live (ListGraph $ map expandBlock blocks)
-- | Expand out synthetic instructions in this block
expandBlock :: NatBasicBlock Instr -> NatBasicBlock Instr
expandBlock (BasicBlock label instrs)
- = let instrs_ol = expandBlockInstrs instrs
- instrs' = fromOL instrs_ol
- in BasicBlock label instrs'
+ = let instrs_ol = expandBlockInstrs instrs
+ instrs' = fromOL instrs_ol
+ in BasicBlock label instrs'
-- | Expand out some instructions
expandBlockInstrs :: [Instr] -> OrdList Instr
-expandBlockInstrs [] = nilOL
-
+expandBlockInstrs [] = nilOL
+
expandBlockInstrs (ii:is)
- = let ii_doubleRegs = remapRegPair ii
- is_misaligned = expandMisalignedDoubles ii_doubleRegs
+ = let ii_doubleRegs = remapRegPair ii
+ is_misaligned = expandMisalignedDoubles ii_doubleRegs
+
+ in is_misaligned `appOL` expandBlockInstrs is
- in is_misaligned `appOL` expandBlockInstrs is
-
-- | In the SPARC instruction set the FP register pairs that are used
--- to hold 64 bit floats are refered to by just the first reg
--- of the pair. Remap our internal reg pairs to the appropriate reg.
+-- to hold 64 bit floats are refered to by just the first reg
+-- of the pair. Remap our internal reg pairs to the appropriate reg.
--
--- For example:
--- ldd [%l1], (%f0 | %f1)
+-- For example:
+-- ldd [%l1], (%f0 | %f1)
--
--- gets mapped to
--- ldd [$l1], %f0
+-- gets mapped to
+-- ldd [$l1], %f0
--
remapRegPair :: Instr -> Instr
remapRegPair instr
- = let patchF reg
- = case reg of
- RegReal (RealRegSingle _)
- -> reg
+ = let patchF reg
+ = case reg of
+ RegReal (RealRegSingle _)
+ -> reg
- RegReal (RealRegPair r1 r2)
+ RegReal (RealRegPair r1 r2)
- -- sanity checking
- | r1 >= 32
- , r1 <= 63
- , r1 `mod` 2 == 0
- , r2 == r1 + 1
- -> RegReal (RealRegSingle r1)
+ -- sanity checking
+ | r1 >= 32
+ , r1 <= 63
+ , r1 `mod` 2 == 0
+ , r2 == r1 + 1
+ -> RegReal (RealRegSingle r1)
- | otherwise
- -> pprPanic "SPARC.CodeGen.Expand: not remapping dodgy looking reg pair " (ppr reg)
+ | otherwise
+ -> pprPanic "SPARC.CodeGen.Expand: not remapping dodgy looking reg pair " (ppr reg)
- RegVirtual _
- -> pprPanic "SPARC.CodeGen.Expand: not remapping virtual reg " (ppr reg)
-
- in patchRegsOfInstr instr patchF
+ RegVirtual _
+ -> pprPanic "SPARC.CodeGen.Expand: not remapping virtual reg " (ppr reg)
+
+ in patchRegsOfInstr instr patchF
-- Expand out 64 bit load/stores into individual instructions to handle
--- possible double alignment problems.
+-- possible double alignment problems.
--
--- TODO: It'd be better to use a scratch reg instead of the add/sub thing.
--- We might be able to do this faster if we use the UA2007 instr set
--- instead of restricting ourselves to SPARC V9.
+-- TODO: It'd be better to use a scratch reg instead of the add/sub thing.
+-- We might be able to do this faster if we use the UA2007 instr set
+-- instead of restricting ourselves to SPARC V9.
--
expandMisalignedDoubles :: Instr -> OrdList Instr
expandMisalignedDoubles instr
- -- Translate to:
- -- add g1,g2,g1
- -- ld [g1],%fn
- -- ld [g1+4],%f(n+1)
- -- sub g1,g2,g1 -- to restore g1
- | LD FF64 (AddrRegReg r1 r2) fReg <- instr
- = toOL [ ADD False False r1 (RIReg r2) r1
- , LD FF32 (AddrRegReg r1 g0) fReg
- , LD FF32 (AddrRegImm r1 (ImmInt 4)) (fRegHi fReg)
- , SUB False False r1 (RIReg r2) r1 ]
-
- -- Translate to
- -- ld [addr],%fn
- -- ld [addr+4],%f(n+1)
- | LD FF64 addr fReg <- instr
- = let Just addr' = addrOffset addr 4
- in toOL [ LD FF32 addr fReg
- , LD FF32 addr' (fRegHi fReg) ]
-
- -- Translate to:
- -- add g1,g2,g1
- -- st %fn,[g1]
- -- st %f(n+1),[g1+4]
- -- sub g1,g2,g1 -- to restore g1
- | ST FF64 fReg (AddrRegReg r1 r2) <- instr
- = toOL [ ADD False False r1 (RIReg r2) r1
- , ST FF32 fReg (AddrRegReg r1 g0)
- , ST FF32 (fRegHi fReg) (AddrRegImm r1 (ImmInt 4))
- , SUB False False r1 (RIReg r2) r1 ]
-
- -- Translate to
- -- ld [addr],%fn
- -- ld [addr+4],%f(n+1)
- | ST FF64 fReg addr <- instr
- = let Just addr' = addrOffset addr 4
- in toOL [ ST FF32 fReg addr
- , ST FF32 (fRegHi fReg) addr' ]
-
- -- some other instr
- | otherwise
- = unitOL instr
-
-
-
--- | The the high partner for this float reg.
+ -- Translate to:
+ -- add g1,g2,g1
+ -- ld [g1],%fn
+ -- ld [g1+4],%f(n+1)
+ -- sub g1,g2,g1 -- to restore g1
+ | LD FF64 (AddrRegReg r1 r2) fReg <- instr
+ = toOL [ ADD False False r1 (RIReg r2) r1
+ , LD FF32 (AddrRegReg r1 g0) fReg
+ , LD FF32 (AddrRegImm r1 (ImmInt 4)) (fRegHi fReg)
+ , SUB False False r1 (RIReg r2) r1 ]
+
+ -- Translate to
+ -- ld [addr],%fn
+ -- ld [addr+4],%f(n+1)
+ | LD FF64 addr fReg <- instr
+ = let Just addr' = addrOffset addr 4
+ in toOL [ LD FF32 addr fReg
+ , LD FF32 addr' (fRegHi fReg) ]
+
+ -- Translate to:
+ -- add g1,g2,g1
+ -- st %fn,[g1]
+ -- st %f(n+1),[g1+4]
+ -- sub g1,g2,g1 -- to restore g1
+ | ST FF64 fReg (AddrRegReg r1 r2) <- instr
+ = toOL [ ADD False False r1 (RIReg r2) r1
+ , ST FF32 fReg (AddrRegReg r1 g0)
+ , ST FF32 (fRegHi fReg) (AddrRegImm r1 (ImmInt 4))
+ , SUB False False r1 (RIReg r2) r1 ]
+
+ -- Translate to
+ -- ld [addr],%fn
+ -- ld [addr+4],%f(n+1)
+ | ST FF64 fReg addr <- instr
+ = let Just addr' = addrOffset addr 4
+ in toOL [ ST FF32 fReg addr
+ , ST FF32 (fRegHi fReg) addr' ]
+
+ -- some other instr
+ | otherwise
+ = unitOL instr
+
+
+
+-- | The the high partner for this float reg.
fRegHi :: Reg -> Reg
fRegHi (RegReal (RealRegSingle r1))
- | r1 >= 32
- , r1 <= 63
- , r1 `mod` 2 == 0
- = (RegReal $ RealRegSingle (r1 + 1))
-
+ | r1 >= 32
+ , r1 <= 63
+ , r1 `mod` 2 == 0
+ = (RegReal $ RealRegSingle (r1 + 1))
+
-- Can't take high partner for non-low reg.
fRegHi reg
- = pprPanic "SPARC.CodeGen.Expand: can't take fRegHi from " (ppr reg)
-
-
-
-
-
-
-
+ = pprPanic "SPARC.CodeGen.Expand: can't take fRegHi from " (ppr reg)
diff --git a/compiler/nativeGen/SPARC/CodeGen/Gen32.hs b/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
index 43b792a840dc..90fb41870db4 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
@@ -1,15 +1,7 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-- | Evaluation of 32 bit values.
module SPARC.CodeGen.Gen32 (
- getSomeReg,
- getRegister
+ getSomeReg,
+ getRegister
)
where
@@ -37,16 +29,16 @@ import OrdList
import Outputable
-- | The dual to getAnyReg: compute an expression into a register, but
--- we don't mind which one it is.
+-- we don't mind which one it is.
getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
getSomeReg expr = do
r <- getRegister expr
case r of
Any rep code -> do
- tmp <- getNewRegNat rep
- return (tmp, code tmp)
- Fixed _ reg code ->
- return (reg, code)
+ tmp <- getNewRegNat rep
+ return (tmp, code tmp)
+ Fixed _ reg code ->
+ return (reg, code)
@@ -54,13 +46,13 @@ getSomeReg expr = do
--
getRegister :: CmmExpr -> NatM Register
-getRegister (CmmReg reg)
+getRegister (CmmReg reg)
= do dflags <- getDynFlags
let platform = targetPlatform dflags
return (Fixed (cmmTypeSize (cmmRegType dflags reg))
(getRegisterReg platform reg) nilOL)
-getRegister tree@(CmmRegOff _ _)
+getRegister tree@(CmmRegOff _ _)
= do dflags <- getDynFlags
getRegister (mangleIndexTree dflags tree)
@@ -80,12 +72,12 @@ getRegister (CmmMachOp (MO_UU_Conv W64 W32) [x]) = do
getRegister (CmmMachOp (MO_SS_Conv W64 W32) [x]) = do
ChildCode64 code rlo <- iselExpr64 x
- return $ Fixed II32 rlo code
+ return $ Fixed II32 rlo code
-- Load a literal float into a float register.
--- The actual literal is stored in a new data area, and we load it
--- at runtime.
+-- The actual literal is stored in a new data area, and we load it
+-- at runtime.
getRegister (CmmLit (CmmFloat f W32)) = do
-- a label for the new data area
@@ -93,13 +85,13 @@ getRegister (CmmLit (CmmFloat f W32)) = do
tmp <- getNewRegNat II32
let code dst = toOL [
- -- the data area
- LDATA ReadOnlyData $ Statics lbl
- [CmmStaticLit (CmmFloat f W32)],
+ -- the data area
+ LDATA ReadOnlyData $ Statics lbl
+ [CmmStaticLit (CmmFloat f W32)],
-- load the literal
- SETHI (HI (ImmCLbl lbl)) tmp,
- LD II32 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
+ SETHI (HI (ImmCLbl lbl)) tmp,
+ LD II32 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
return (Any FF32 code)
@@ -107,342 +99,342 @@ getRegister (CmmLit (CmmFloat d W64)) = do
lbl <- getNewLabelNat
tmp <- getNewRegNat II32
let code dst = toOL [
- LDATA ReadOnlyData $ Statics lbl
- [CmmStaticLit (CmmFloat d W64)],
- SETHI (HI (ImmCLbl lbl)) tmp,
- LD II64 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
+ LDATA ReadOnlyData $ Statics lbl
+ [CmmStaticLit (CmmFloat d W64)],
+ SETHI (HI (ImmCLbl lbl)) tmp,
+ LD II64 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
return (Any FF64 code)
-- Unary machine ops
getRegister (CmmMachOp mop [x])
= case mop of
- -- Floating point negation -------------------------
- MO_F_Neg W32 -> trivialUFCode FF32 (FNEG FF32) x
- MO_F_Neg W64 -> trivialUFCode FF64 (FNEG FF64) x
+ -- Floating point negation -------------------------
+ MO_F_Neg W32 -> trivialUFCode FF32 (FNEG FF32) x
+ MO_F_Neg W64 -> trivialUFCode FF64 (FNEG FF64) x
- -- Integer negation --------------------------------
- MO_S_Neg rep -> trivialUCode (intSize rep) (SUB False False g0) x
- MO_Not rep -> trivialUCode (intSize rep) (XNOR False g0) x
+ -- Integer negation --------------------------------
+ MO_S_Neg rep -> trivialUCode (intSize rep) (SUB False False g0) x
+ MO_Not rep -> trivialUCode (intSize rep) (XNOR False g0) x
- -- Float word size conversion ----------------------
- MO_FF_Conv W64 W32 -> coerceDbl2Flt x
- MO_FF_Conv W32 W64 -> coerceFlt2Dbl x
+ -- Float word size conversion ----------------------
+ MO_FF_Conv W64 W32 -> coerceDbl2Flt x
+ MO_FF_Conv W32 W64 -> coerceFlt2Dbl x
- -- Float <-> Signed Int conversion -----------------
- MO_FS_Conv from to -> coerceFP2Int from to x
- MO_SF_Conv from to -> coerceInt2FP from to x
+ -- Float <-> Signed Int conversion -----------------
+ MO_FS_Conv from to -> coerceFP2Int from to x
+ MO_SF_Conv from to -> coerceInt2FP from to x
- -- Unsigned integer word size conversions ----------
+ -- Unsigned integer word size conversions ----------
- -- If it's the same size, then nothing needs to be done.
- MO_UU_Conv from to
- | from == to -> conversionNop (intSize to) x
+ -- If it's the same size, then nothing needs to be done.
+ MO_UU_Conv from to
+ | from == to -> conversionNop (intSize to) x
- -- To narrow an unsigned word, mask out the high bits to simulate what would
- -- happen if we copied the value into a smaller register.
- MO_UU_Conv W16 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
- MO_UU_Conv W32 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
+ -- To narrow an unsigned word, mask out the high bits to simulate what would
+ -- happen if we copied the value into a smaller register.
+ MO_UU_Conv W16 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
+ MO_UU_Conv W32 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
- -- for narrowing 32 bit to 16 bit, don't use a literal mask value like the W16->W8
- -- case because the only way we can load it is via SETHI, which needs 2 ops.
- -- Do some shifts to chop out the high bits instead.
- MO_UU_Conv W32 W16
- -> do tmpReg <- getNewRegNat II32
- (xReg, xCode) <- getSomeReg x
- let code dst
- = xCode
- `appOL` toOL
- [ SLL xReg (RIImm $ ImmInt 16) tmpReg
- , SRL tmpReg (RIImm $ ImmInt 16) dst]
-
- return $ Any II32 code
-
- -- trivialCode W16 (AND False) x (CmmLit (CmmInt 65535 W16))
+ -- for narrowing 32 bit to 16 bit, don't use a literal mask value like the W16->W8
+ -- case because the only way we can load it is via SETHI, which needs 2 ops.
+ -- Do some shifts to chop out the high bits instead.
+ MO_UU_Conv W32 W16
+ -> do tmpReg <- getNewRegNat II32
+ (xReg, xCode) <- getSomeReg x
+ let code dst
+ = xCode
+ `appOL` toOL
+ [ SLL xReg (RIImm $ ImmInt 16) tmpReg
+ , SRL tmpReg (RIImm $ ImmInt 16) dst]
- -- To widen an unsigned word we don't have to do anything.
- -- Just leave it in the same register and mark the result as the new size.
- MO_UU_Conv W8 W16 -> conversionNop (intSize W16) x
- MO_UU_Conv W8 W32 -> conversionNop (intSize W32) x
- MO_UU_Conv W16 W32 -> conversionNop (intSize W32) x
+ return $ Any II32 code
+ -- trivialCode W16 (AND False) x (CmmLit (CmmInt 65535 W16))
- -- Signed integer word size conversions ------------
+ -- To widen an unsigned word we don't have to do anything.
+ -- Just leave it in the same register and mark the result as the new size.
+ MO_UU_Conv W8 W16 -> conversionNop (intSize W16) x
+ MO_UU_Conv W8 W32 -> conversionNop (intSize W32) x
+ MO_UU_Conv W16 W32 -> conversionNop (intSize W32) x
- -- Mask out high bits when narrowing them
- MO_SS_Conv W16 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
- MO_SS_Conv W32 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
- MO_SS_Conv W32 W16 -> trivialCode W16 (AND False) x (CmmLit (CmmInt 65535 W16))
- -- Sign extend signed words when widening them.
- MO_SS_Conv W8 W16 -> integerExtend W8 W16 x
- MO_SS_Conv W8 W32 -> integerExtend W8 W32 x
- MO_SS_Conv W16 W32 -> integerExtend W16 W32 x
+ -- Signed integer word size conversions ------------
- _ -> panic ("Unknown unary mach op: " ++ show mop)
+ -- Mask out high bits when narrowing them
+ MO_SS_Conv W16 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
+ MO_SS_Conv W32 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
+ MO_SS_Conv W32 W16 -> trivialCode W16 (AND False) x (CmmLit (CmmInt 65535 W16))
+
+ -- Sign extend signed words when widening them.
+ MO_SS_Conv W8 W16 -> integerExtend W8 W16 x
+ MO_SS_Conv W8 W32 -> integerExtend W8 W32 x
+ MO_SS_Conv W16 W32 -> integerExtend W16 W32 x
+
+ _ -> panic ("Unknown unary mach op: " ++ show mop)
-- Binary machine ops
-getRegister (CmmMachOp mop [x, y])
+getRegister (CmmMachOp mop [x, y])
= case mop of
- MO_Eq _ -> condIntReg EQQ x y
- MO_Ne _ -> condIntReg NE x y
-
- MO_S_Gt _ -> condIntReg GTT x y
- MO_S_Ge _ -> condIntReg GE x y
- MO_S_Lt _ -> condIntReg LTT x y
- MO_S_Le _ -> condIntReg LE x y
-
- MO_U_Gt W32 -> condIntReg GU x y
- MO_U_Ge W32 -> condIntReg GEU x y
- MO_U_Lt W32 -> condIntReg LU x y
- MO_U_Le W32 -> condIntReg LEU x y
-
- MO_U_Gt W16 -> condIntReg GU x y
- MO_U_Ge W16 -> condIntReg GEU x y
- MO_U_Lt W16 -> condIntReg LU x y
- MO_U_Le W16 -> condIntReg LEU x y
-
- MO_Add W32 -> trivialCode W32 (ADD False False) x y
- MO_Sub W32 -> trivialCode W32 (SUB False False) x y
+ MO_Eq _ -> condIntReg EQQ x y
+ MO_Ne _ -> condIntReg NE x y
+
+ MO_S_Gt _ -> condIntReg GTT x y
+ MO_S_Ge _ -> condIntReg GE x y
+ MO_S_Lt _ -> condIntReg LTT x y
+ MO_S_Le _ -> condIntReg LE x y
+
+ MO_U_Gt W32 -> condIntReg GU x y
+ MO_U_Ge W32 -> condIntReg GEU x y
+ MO_U_Lt W32 -> condIntReg LU x y
+ MO_U_Le W32 -> condIntReg LEU x y
+
+ MO_U_Gt W16 -> condIntReg GU x y
+ MO_U_Ge W16 -> condIntReg GEU x y
+ MO_U_Lt W16 -> condIntReg LU x y
+ MO_U_Le W16 -> condIntReg LEU x y
+
+ MO_Add W32 -> trivialCode W32 (ADD False False) x y
+ MO_Sub W32 -> trivialCode W32 (SUB False False) x y
MO_S_MulMayOflo rep -> imulMayOflo rep x y
- MO_S_Quot W32 -> idiv True False x y
- MO_U_Quot W32 -> idiv False False x y
-
- MO_S_Rem W32 -> irem True x y
- MO_U_Rem W32 -> irem False x y
-
- MO_F_Eq _ -> condFltReg EQQ x y
- MO_F_Ne _ -> condFltReg NE x y
+ MO_S_Quot W32 -> idiv True False x y
+ MO_U_Quot W32 -> idiv False False x y
+
+ MO_S_Rem W32 -> irem True x y
+ MO_U_Rem W32 -> irem False x y
+
+ MO_F_Eq _ -> condFltReg EQQ x y
+ MO_F_Ne _ -> condFltReg NE x y
- MO_F_Gt _ -> condFltReg GTT x y
- MO_F_Ge _ -> condFltReg GE x y
- MO_F_Lt _ -> condFltReg LTT x y
- MO_F_Le _ -> condFltReg LE x y
+ MO_F_Gt _ -> condFltReg GTT x y
+ MO_F_Ge _ -> condFltReg GE x y
+ MO_F_Lt _ -> condFltReg LTT x y
+ MO_F_Le _ -> condFltReg LE x y
- MO_F_Add w -> trivialFCode w FADD x y
- MO_F_Sub w -> trivialFCode w FSUB x y
- MO_F_Mul w -> trivialFCode w FMUL x y
- MO_F_Quot w -> trivialFCode w FDIV x y
+ MO_F_Add w -> trivialFCode w FADD x y
+ MO_F_Sub w -> trivialFCode w FSUB x y
+ MO_F_Mul w -> trivialFCode w FMUL x y
+ MO_F_Quot w -> trivialFCode w FDIV x y
- MO_And rep -> trivialCode rep (AND False) x y
- MO_Or rep -> trivialCode rep (OR False) x y
- MO_Xor rep -> trivialCode rep (XOR False) x y
+ MO_And rep -> trivialCode rep (AND False) x y
+ MO_Or rep -> trivialCode rep (OR False) x y
+ MO_Xor rep -> trivialCode rep (XOR False) x y
- MO_Mul rep -> trivialCode rep (SMUL False) x y
+ MO_Mul rep -> trivialCode rep (SMUL False) x y
- MO_Shl rep -> trivialCode rep SLL x y
- MO_U_Shr rep -> trivialCode rep SRL x y
- MO_S_Shr rep -> trivialCode rep SRA x y
+ MO_Shl rep -> trivialCode rep SLL x y
+ MO_U_Shr rep -> trivialCode rep SRL x y
+ MO_S_Shr rep -> trivialCode rep SRA x y
- _ -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
+ _ -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
where
getRegister (CmmLoad mem pk) = do
Amode src code <- getAmode mem
let
- code__2 dst = code `snocOL` LD (cmmTypeSize pk) src dst
+ code__2 dst = code `snocOL` LD (cmmTypeSize pk) src dst
return (Any (cmmTypeSize pk) code__2)
getRegister (CmmLit (CmmInt i _))
| fits13Bits i
= let
- src = ImmInt (fromInteger i)
- code dst = unitOL (OR False g0 (RIImm src) dst)
+ src = ImmInt (fromInteger i)
+ code dst = unitOL (OR False g0 (RIImm src) dst)
in
- return (Any II32 code)
+ return (Any II32 code)
getRegister (CmmLit lit)
= let imm = litToImm lit
- code dst = toOL [
- SETHI (HI imm) dst,
- OR False dst (RIImm (LO imm)) dst]
+ code dst = toOL [
+ SETHI (HI imm) dst,
+ OR False dst (RIImm (LO imm)) dst]
in return (Any II32 code)
getRegister _
- = panic "SPARC.CodeGen.Gen32.getRegister: no match"
+ = panic "SPARC.CodeGen.Gen32.getRegister: no match"
-- | sign extend and widen
-integerExtend
- :: Width -- ^ width of source expression
- -> Width -- ^ width of result
- -> CmmExpr -- ^ source expression
- -> NatM Register
+integerExtend
+ :: Width -- ^ width of source expression
+ -> Width -- ^ width of result
+ -> CmmExpr -- ^ source expression
+ -> NatM Register
integerExtend from to expr
- = do -- load the expr into some register
- (reg, e_code) <- getSomeReg expr
- tmp <- getNewRegNat II32
- let bitCount
- = case (from, to) of
- (W8, W32) -> 24
- (W16, W32) -> 16
- (W8, W16) -> 24
- _ -> panic "SPARC.CodeGen.Gen32: no match"
- let code dst
- = e_code
-
- -- local shift word left to load the sign bit
- `snocOL` SLL reg (RIImm (ImmInt bitCount)) tmp
-
- -- arithmetic shift right to sign extend
- `snocOL` SRA tmp (RIImm (ImmInt bitCount)) dst
-
- return (Any (intSize to) code)
-
+ = do -- load the expr into some register
+ (reg, e_code) <- getSomeReg expr
+ tmp <- getNewRegNat II32
+ let bitCount
+ = case (from, to) of
+ (W8, W32) -> 24
+ (W16, W32) -> 16
+ (W8, W16) -> 24
+ _ -> panic "SPARC.CodeGen.Gen32: no match"
+ let code dst
+ = e_code
+
+ -- local shift word left to load the sign bit
+ `snocOL` SLL reg (RIImm (ImmInt bitCount)) tmp
+
+ -- arithmetic shift right to sign extend
+ `snocOL` SRA tmp (RIImm (ImmInt bitCount)) dst
+
+ return (Any (intSize to) code)
+
-- | For nop word format conversions we set the resulting value to have the
--- required size, but don't need to generate any actual code.
+-- required size, but don't need to generate any actual code.
--
conversionNop
- :: Size -> CmmExpr -> NatM Register
+ :: Size -> CmmExpr -> NatM Register
conversionNop new_rep expr
- = do e_code <- getRegister expr
- return (setSizeOfRegister e_code new_rep)
+ = do e_code <- getRegister expr
+ return (setSizeOfRegister e_code new_rep)
-- | Generate an integer division instruction.
idiv :: Bool -> Bool -> CmmExpr -> CmmExpr -> NatM Register
-
--- For unsigned division with a 32 bit numerator,
--- we can just clear the Y register.
-idiv False cc x y
+
+-- For unsigned division with a 32 bit numerator,
+-- we can just clear the Y register.
+idiv False cc x y
= do
- (a_reg, a_code) <- getSomeReg x
- (b_reg, b_code) <- getSomeReg y
-
- let code dst
- = a_code
- `appOL` b_code
- `appOL` toOL
- [ WRY g0 g0
- , UDIV cc a_reg (RIReg b_reg) dst]
-
- return (Any II32 code)
-
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ WRY g0 g0
+ , UDIV cc a_reg (RIReg b_reg) dst]
+
+ return (Any II32 code)
+
-- For _signed_ division with a 32 bit numerator,
--- we have to sign extend the numerator into the Y register.
-idiv True cc x y
+-- we have to sign extend the numerator into the Y register.
+idiv True cc x y
= do
- (a_reg, a_code) <- getSomeReg x
- (b_reg, b_code) <- getSomeReg y
-
- tmp <- getNewRegNat II32
-
- let code dst
- = a_code
- `appOL` b_code
- `appOL` toOL
- [ SRA a_reg (RIImm (ImmInt 16)) tmp -- sign extend
- , SRA tmp (RIImm (ImmInt 16)) tmp
-
- , WRY tmp g0
- , SDIV cc a_reg (RIReg b_reg) dst]
-
- return (Any II32 code)
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp <- getNewRegNat II32
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ SRA a_reg (RIImm (ImmInt 16)) tmp -- sign extend
+ , SRA tmp (RIImm (ImmInt 16)) tmp
+
+ , WRY tmp g0
+ , SDIV cc a_reg (RIReg b_reg) dst]
+
+ return (Any II32 code)
-- | Do an integer remainder.
--
--- NOTE: The SPARC v8 architecture manual says that integer division
--- instructions _may_ generate a remainder, depending on the implementation.
--- If so it is _recommended_ that the remainder is placed in the Y register.
+-- NOTE: The SPARC v8 architecture manual says that integer division
+-- instructions _may_ generate a remainder, depending on the implementation.
+-- If so it is _recommended_ that the remainder is placed in the Y register.
--
-- The UltraSparc 2007 manual says Y is _undefined_ after division.
--
--- The SPARC T2 doesn't store the remainder, not sure about the others.
--- It's probably best not to worry about it, and just generate our own
--- remainders.
+-- The SPARC T2 doesn't store the remainder, not sure about the others.
+-- It's probably best not to worry about it, and just generate our own
+-- remainders.
--
irem :: Bool -> CmmExpr -> CmmExpr -> NatM Register
--- For unsigned operands:
--- Division is between a 64 bit numerator and a 32 bit denominator,
--- so we still have to clear the Y register.
-irem False x y
+-- For unsigned operands:
+-- Division is between a 64 bit numerator and a 32 bit denominator,
+-- so we still have to clear the Y register.
+irem False x y
= do
- (a_reg, a_code) <- getSomeReg x
- (b_reg, b_code) <- getSomeReg y
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp_reg <- getNewRegNat II32
- tmp_reg <- getNewRegNat II32
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ WRY g0 g0
+ , UDIV False a_reg (RIReg b_reg) tmp_reg
+ , UMUL False tmp_reg (RIReg b_reg) tmp_reg
+ , SUB False False a_reg (RIReg tmp_reg) dst]
+
+ return (Any II32 code)
- let code dst
- = a_code
- `appOL` b_code
- `appOL` toOL
- [ WRY g0 g0
- , UDIV False a_reg (RIReg b_reg) tmp_reg
- , UMUL False tmp_reg (RIReg b_reg) tmp_reg
- , SUB False False a_reg (RIReg tmp_reg) dst]
-
- return (Any II32 code)
-
-- For signed operands:
--- Make sure to sign extend into the Y register, or the remainder
--- will have the wrong sign when the numerator is negative.
+-- Make sure to sign extend into the Y register, or the remainder
+-- will have the wrong sign when the numerator is negative.
--
--- TODO: When sign extending, GCC only shifts the a_reg right by 17 bits,
--- not the full 32. Not sure why this is, something to do with overflow?
--- If anyone cares enough about the speed of signed remainder they
--- can work it out themselves (then tell me). -- BL 2009/01/20
-irem True x y
+-- TODO: When sign extending, GCC only shifts the a_reg right by 17 bits,
+-- not the full 32. Not sure why this is, something to do with overflow?
+-- If anyone cares enough about the speed of signed remainder they
+-- can work it out themselves (then tell me). -- BL 2009/01/20
+irem True x y
= do
- (a_reg, a_code) <- getSomeReg x
- (b_reg, b_code) <- getSomeReg y
-
- tmp1_reg <- getNewRegNat II32
- tmp2_reg <- getNewRegNat II32
-
- let code dst
- = a_code
- `appOL` b_code
- `appOL` toOL
- [ SRA a_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
- , SRA tmp1_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
- , WRY tmp1_reg g0
-
- , SDIV False a_reg (RIReg b_reg) tmp2_reg
- , SMUL False tmp2_reg (RIReg b_reg) tmp2_reg
- , SUB False False a_reg (RIReg tmp2_reg) dst]
-
- return (Any II32 code)
-
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp1_reg <- getNewRegNat II32
+ tmp2_reg <- getNewRegNat II32
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ SRA a_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
+ , SRA tmp1_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
+ , WRY tmp1_reg g0
+
+ , SDIV False a_reg (RIReg b_reg) tmp2_reg
+ , SMUL False tmp2_reg (RIReg b_reg) tmp2_reg
+ , SUB False False a_reg (RIReg tmp2_reg) dst]
+
+ return (Any II32 code)
+
imulMayOflo :: Width -> CmmExpr -> CmmExpr -> NatM Register
-imulMayOflo rep a b
+imulMayOflo rep a b
= do
- (a_reg, a_code) <- getSomeReg a
- (b_reg, b_code) <- getSomeReg b
- res_lo <- getNewRegNat II32
- res_hi <- getNewRegNat II32
-
- let shift_amt = case rep of
- W32 -> 31
- W64 -> 63
- _ -> panic "shift_amt"
-
- let code dst = a_code `appOL` b_code `appOL`
+ (a_reg, a_code) <- getSomeReg a
+ (b_reg, b_code) <- getSomeReg b
+ res_lo <- getNewRegNat II32
+ res_hi <- getNewRegNat II32
+
+ let shift_amt = case rep of
+ W32 -> 31
+ W64 -> 63
+ _ -> panic "shift_amt"
+
+ let code dst = a_code `appOL` b_code `appOL`
toOL [
SMUL False a_reg (RIReg b_reg) res_lo,
RDY res_hi,
SRA res_lo (RIImm (ImmInt shift_amt)) res_lo,
SUB False False res_lo (RIReg res_hi) dst
]
- return (Any II32 code)
+ return (Any II32 code)
-- -----------------------------------------------------------------------------
@@ -458,19 +450,19 @@ imulMayOflo rep a b
-- have handled the constant-folding.
trivialCode
- :: Width
- -> (Reg -> RI -> Reg -> Instr)
- -> CmmExpr
- -> CmmExpr
- -> NatM Register
-
+ :: Width
+ -> (Reg -> RI -> Reg -> Instr)
+ -> CmmExpr
+ -> CmmExpr
+ -> NatM Register
+
trivialCode _ instr x (CmmLit (CmmInt y _))
| fits13Bits y
= do
(src1, code) <- getSomeReg x
let
- src2 = ImmInt (fromInteger y)
- code__2 dst = code `snocOL` instr src1 (RIImm src2) dst
+ src2 = ImmInt (fromInteger y)
+ code__2 dst = code `snocOL` instr src1 (RIImm src2) dst
return (Any II32 code__2)
@@ -478,17 +470,17 @@ trivialCode _ instr x y = do
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
let
- code__2 dst = code1 `appOL` code2 `snocOL`
- instr src1 (RIReg src2) dst
+ code__2 dst = code1 `appOL` code2 `snocOL`
+ instr src1 (RIReg src2) dst
return (Any II32 code__2)
-trivialFCode
- :: Width
- -> (Size -> Reg -> Reg -> Reg -> Instr)
- -> CmmExpr
- -> CmmExpr
- -> NatM Register
+trivialFCode
+ :: Width
+ -> (Size -> Reg -> Reg -> Reg -> Instr)
+ -> CmmExpr
+ -> CmmExpr
+ -> NatM Register
trivialFCode pk instr x y = do
dflags <- getDynFlags
@@ -496,49 +488,49 @@ trivialFCode pk instr x y = do
(src2, code2) <- getSomeReg y
tmp <- getNewRegNat FF64
let
- promote x = FxTOy FF32 FF64 x tmp
+ promote x = FxTOy FF32 FF64 x tmp
- pk1 = cmmExprType dflags x
- pk2 = cmmExprType dflags y
+ pk1 = cmmExprType dflags x
+ pk2 = cmmExprType dflags y
- code__2 dst =
- if pk1 `cmmEqType` pk2 then
- code1 `appOL` code2 `snocOL`
- instr (floatSize pk) src1 src2 dst
- else if typeWidth pk1 == W32 then
- code1 `snocOL` promote src1 `appOL` code2 `snocOL`
- instr FF64 tmp src2 dst
- else
- code1 `appOL` code2 `snocOL` promote src2 `snocOL`
- instr FF64 src1 tmp dst
- return (Any (cmmTypeSize $ if pk1 `cmmEqType` pk2 then pk1 else cmmFloat W64)
- code__2)
+ code__2 dst =
+ if pk1 `cmmEqType` pk2 then
+ code1 `appOL` code2 `snocOL`
+ instr (floatSize pk) src1 src2 dst
+ else if typeWidth pk1 == W32 then
+ code1 `snocOL` promote src1 `appOL` code2 `snocOL`
+ instr FF64 tmp src2 dst
+ else
+ code1 `appOL` code2 `snocOL` promote src2 `snocOL`
+ instr FF64 src1 tmp dst
+ return (Any (cmmTypeSize $ if pk1 `cmmEqType` pk2 then pk1 else cmmFloat W64)
+ code__2)
trivialUCode
- :: Size
- -> (RI -> Reg -> Instr)
- -> CmmExpr
- -> NatM Register
-
+ :: Size
+ -> (RI -> Reg -> Instr)
+ -> CmmExpr
+ -> NatM Register
+
trivialUCode size instr x = do
(src, code) <- getSomeReg x
let
- code__2 dst = code `snocOL` instr (RIReg src) dst
+ code__2 dst = code `snocOL` instr (RIReg src) dst
return (Any size code__2)
-trivialUFCode
- :: Size
- -> (Reg -> Reg -> Instr)
- -> CmmExpr
- -> NatM Register
-
+trivialUFCode
+ :: Size
+ -> (Reg -> Reg -> Instr)
+ -> CmmExpr
+ -> NatM Register
+
trivialUFCode pk instr x = do
(src, code) <- getSomeReg x
let
- code__2 dst = code `snocOL` instr src dst
+ code__2 dst = code `snocOL` instr src dst
return (Any pk code__2)
@@ -551,10 +543,10 @@ coerceInt2FP :: Width -> Width -> CmmExpr -> NatM Register
coerceInt2FP width1 width2 x = do
(src, code) <- getSomeReg x
let
- code__2 dst = code `appOL` toOL [
- ST (intSize width1) src (spRel (-2)),
- LD (intSize width1) (spRel (-2)) dst,
- FxTOy (intSize width1) (floatSize width2) dst dst]
+ code__2 dst = code `appOL` toOL [
+ ST (intSize width1) src (spRel (-2)),
+ LD (intSize width1) (spRel (-2)) dst,
+ FxTOy (intSize width1) (floatSize width2) dst dst]
return (Any (floatSize $ width2) code__2)
@@ -562,37 +554,37 @@ coerceInt2FP width1 width2 x = do
-- | Coerce a floating point value to integer
--
-- NOTE: On sparc v9 there are no instructions to move a value from an
--- FP register directly to an int register, so we have to use a load/store.
+-- FP register directly to an int register, so we have to use a load/store.
--
coerceFP2Int :: Width -> Width -> CmmExpr -> NatM Register
-coerceFP2Int width1 width2 x
- = do let fsize1 = floatSize width1
- fsize2 = floatSize width2
-
- isize2 = intSize width2
+coerceFP2Int width1 width2 x
+ = do let fsize1 = floatSize width1
+ fsize2 = floatSize width2
+
+ isize2 = intSize width2
+
+ (fsrc, code) <- getSomeReg x
+ fdst <- getNewRegNat fsize2
- (fsrc, code) <- getSomeReg x
- fdst <- getNewRegNat fsize2
-
- let code2 dst
- = code
- `appOL` toOL
- -- convert float to int format, leaving it in a float reg.
- [ FxTOy fsize1 isize2 fsrc fdst
+ let code2 dst
+ = code
+ `appOL` toOL
+ -- convert float to int format, leaving it in a float reg.
+ [ FxTOy fsize1 isize2 fsrc fdst
- -- store the int into mem, then load it back to move
- -- it into an actual int reg.
- , ST fsize2 fdst (spRel (-2))
- , LD isize2 (spRel (-2)) dst]
+ -- store the int into mem, then load it back to move
+ -- it into an actual int reg.
+ , ST fsize2 fdst (spRel (-2))
+ , LD isize2 (spRel (-2)) dst]
- return (Any isize2 code2)
+ return (Any isize2 code2)
-- | Coerce a double precision floating point value to single precision.
coerceDbl2Flt :: CmmExpr -> NatM Register
coerceDbl2Flt x = do
(src, code) <- getSomeReg x
- return (Any FF32 (\dst -> code `snocOL` FxTOy FF64 FF32 src dst))
+ return (Any FF32 (\dst -> code `snocOL` FxTOy FF64 FF32 src dst))
-- | Coerce a single precision floating point value to double precision
@@ -606,45 +598,45 @@ coerceFlt2Dbl x = do
-- Condition Codes -------------------------------------------------------------
--
--- Evaluate a comparision, and get the result into a register.
---
+-- Evaluate a comparison, and get the result into a register.
+--
-- Do not fill the delay slots here. you will confuse the register allocator.
--
condIntReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
condIntReg EQQ x (CmmLit (CmmInt 0 _)) = do
(src, code) <- getSomeReg x
let
- code__2 dst = code `appOL` toOL [
- SUB False True g0 (RIReg src) g0,
- SUB True False g0 (RIImm (ImmInt (-1))) dst]
+ code__2 dst = code `appOL` toOL [
+ SUB False True g0 (RIReg src) g0,
+ SUB True False g0 (RIImm (ImmInt (-1))) dst]
return (Any II32 code__2)
condIntReg EQQ x y = do
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
let
- code__2 dst = code1 `appOL` code2 `appOL` toOL [
- XOR False src1 (RIReg src2) dst,
- SUB False True g0 (RIReg dst) g0,
- SUB True False g0 (RIImm (ImmInt (-1))) dst]
+ code__2 dst = code1 `appOL` code2 `appOL` toOL [
+ XOR False src1 (RIReg src2) dst,
+ SUB False True g0 (RIReg dst) g0,
+ SUB True False g0 (RIImm (ImmInt (-1))) dst]
return (Any II32 code__2)
condIntReg NE x (CmmLit (CmmInt 0 _)) = do
(src, code) <- getSomeReg x
let
- code__2 dst = code `appOL` toOL [
- SUB False True g0 (RIReg src) g0,
- ADD True False g0 (RIImm (ImmInt 0)) dst]
+ code__2 dst = code `appOL` toOL [
+ SUB False True g0 (RIReg src) g0,
+ ADD True False g0 (RIImm (ImmInt 0)) dst]
return (Any II32 code__2)
condIntReg NE x y = do
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
let
- code__2 dst = code1 `appOL` code2 `appOL` toOL [
- XOR False src1 (RIReg src2) dst,
- SUB False True g0 (RIReg dst) g0,
- ADD True False g0 (RIImm (ImmInt 0)) dst]
+ code__2 dst = code1 `appOL` code2 `appOL` toOL [
+ XOR False src1 (RIReg src2) dst,
+ SUB False True g0 (RIReg dst) g0,
+ ADD True False g0 (RIImm (ImmInt 0)) dst]
return (Any II32 code__2)
condIntReg cond x y = do
@@ -652,22 +644,22 @@ condIntReg cond x y = do
bid2 <- liftM (\a -> seq a a) getBlockIdNat
CondCode _ cond cond_code <- condIntCode cond x y
let
- code__2 dst
- = cond_code
- `appOL` toOL
- [ BI cond False bid1
- , NOP
+ code__2 dst
+ = cond_code
+ `appOL` toOL
+ [ BI cond False bid1
+ , NOP
- , OR False g0 (RIImm (ImmInt 0)) dst
- , BI ALWAYS False bid2
- , NOP
+ , OR False g0 (RIImm (ImmInt 0)) dst
+ , BI ALWAYS False bid2
+ , NOP
- , NEWBLOCK bid1
- , OR False g0 (RIImm (ImmInt 1)) dst
- , BI ALWAYS False bid2
- , NOP
+ , NEWBLOCK bid1
+ , OR False g0 (RIImm (ImmInt 1)) dst
+ , BI ALWAYS False bid2
+ , NOP
- , NEWBLOCK bid2]
+ , NEWBLOCK bid2]
return (Any II32 code__2)
@@ -679,26 +671,22 @@ condFltReg cond x y = do
CondCode _ cond cond_code <- condFltCode cond x y
let
- code__2 dst
- = cond_code
- `appOL` toOL
- [ NOP
- , BF cond False bid1
- , NOP
+ code__2 dst
+ = cond_code
+ `appOL` toOL
+ [ NOP
+ , BF cond False bid1
+ , NOP
- , OR False g0 (RIImm (ImmInt 0)) dst
- , BI ALWAYS False bid2
- , NOP
+ , OR False g0 (RIImm (ImmInt 0)) dst
+ , BI ALWAYS False bid2
+ , NOP
- , NEWBLOCK bid1
- , OR False g0 (RIImm (ImmInt 1)) dst
- , BI ALWAYS False bid2
- , NOP
+ , NEWBLOCK bid1
+ , OR False g0 (RIImm (ImmInt 1)) dst
+ , BI ALWAYS False bid2
+ , NOP
- , NEWBLOCK bid2 ]
+ , NEWBLOCK bid2 ]
return (Any II32 code__2)
-
-
-
-
diff --git a/compiler/nativeGen/SPARC/CodeGen/Sanity.hs b/compiler/nativeGen/SPARC/CodeGen/Sanity.hs
index d4cdaf2b1672..81641326f2aa 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Sanity.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Sanity.hs
@@ -1,22 +1,13 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
--- | One ounce of sanity checking is worth 10000000000000000 ounces
--- of staring blindly at assembly code trying to find the problem..
---
+-- | One ounce of sanity checking is worth 10000000000000000 ounces
+-- of staring blindly at assembly code trying to find the problem..
module SPARC.CodeGen.Sanity (
- checkBlock
+ checkBlock
)
where
import SPARC.Instr
-import SPARC.Ppr ()
+import SPARC.Ppr ()
import Instruction
import Cmm
@@ -31,48 +22,46 @@ checkBlock :: CmmBlock
-> NatBasicBlock Instr
checkBlock cmm block@(BasicBlock _ instrs)
- | checkBlockInstrs instrs
- = block
-
- | otherwise
- = pprPanic
- ("SPARC.CodeGen: bad block\n")
- ( vcat [ text " -- cmm -----------------\n"
- , ppr cmm
- , text " -- native code ---------\n"
- , ppr block ])
+ | checkBlockInstrs instrs
+ = block
+
+ | otherwise
+ = pprPanic
+ ("SPARC.CodeGen: bad block\n")
+ ( vcat [ text " -- cmm -----------------\n"
+ , ppr cmm
+ , text " -- native code ---------\n"
+ , ppr block ])
checkBlockInstrs :: [Instr] -> Bool
checkBlockInstrs ii
- -- An unconditional jumps end the block.
- -- There must be an unconditional jump in the block, otherwise
- -- the register liveness determinator will get the liveness
- -- information wrong.
- --
- -- If the block ends with a cmm call that never returns
- -- then there can be unreachable instructions after the jump,
- -- but we don't mind here.
- --
- | instr : NOP : _ <- ii
- , isUnconditionalJump instr
- = True
-
- -- All jumps must have a NOP in their branch delay slot.
- -- The liveness determinator and register allocators aren't smart
- -- enough to handle branch delay slots.
- --
- | instr : NOP : is <- ii
- , isJumpishInstr instr
- = checkBlockInstrs is
-
- -- keep checking
- | _:i2:is <- ii
- = checkBlockInstrs (i2:is)
-
- -- this block is no good
- | otherwise
- = False
-
-
+ -- An unconditional jumps end the block.
+ -- There must be an unconditional jump in the block, otherwise
+ -- the register liveness determinator will get the liveness
+ -- information wrong.
+ --
+ -- If the block ends with a cmm call that never returns
+ -- then there can be unreachable instructions after the jump,
+ -- but we don't mind here.
+ --
+ | instr : NOP : _ <- ii
+ , isUnconditionalJump instr
+ = True
+
+ -- All jumps must have a NOP in their branch delay slot.
+ -- The liveness determinator and register allocators aren't smart
+ -- enough to handle branch delay slots.
+ --
+ | instr : NOP : is <- ii
+ , isJumpishInstr instr
+ = checkBlockInstrs is
+
+ -- keep checking
+ | _:i2:is <- ii
+ = checkBlockInstrs (i2:is)
+
+ -- this block is no good
+ | otherwise
+ = False
diff --git a/compiler/nativeGen/SPARC/Cond.hs b/compiler/nativeGen/SPARC/Cond.hs
index b8919a72a2a7..da41457950e5 100644
--- a/compiler/nativeGen/SPARC/Cond.hs
+++ b/compiler/nativeGen/SPARC/Cond.hs
@@ -1,39 +1,31 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.Cond (
- Cond(..),
- condUnsigned,
- condToSigned,
- condToUnsigned
+ Cond(..),
+ condUnsigned,
+ condToSigned,
+ condToUnsigned
)
where
-- | Branch condition codes.
data Cond
- = ALWAYS
- | EQQ
- | GE
- | GEU
- | GTT
- | GU
- | LE
- | LEU
- | LTT
- | LU
- | NE
- | NEG
- | NEVER
- | POS
- | VC
- | VS
- deriving Eq
+ = ALWAYS
+ | EQQ
+ | GE
+ | GEU
+ | GTT
+ | GU
+ | LE
+ | LEU
+ | LTT
+ | LU
+ | NE
+ | NEG
+ | NEVER
+ | POS
+ | VC
+ | VS
+ deriving Eq
condUnsigned :: Cond -> Bool
diff --git a/compiler/nativeGen/SPARC/Imm.hs b/compiler/nativeGen/SPARC/Imm.hs
index 4c2bb5a4814e..cb53ba411c6e 100644
--- a/compiler/nativeGen/SPARC/Imm.hs
+++ b/compiler/nativeGen/SPARC/Imm.hs
@@ -1,16 +1,8 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.Imm (
- -- immediate values
- Imm(..),
- strImmLit,
- litToImm
+ -- immediate values
+ Imm(..),
+ strImmLit,
+ litToImm
)
where
@@ -21,29 +13,29 @@ import CLabel
import Outputable
-- | An immediate value.
--- Not all of these are directly representable by the machine.
--- Things like ImmLit are slurped out and put in a data segment instead.
+-- Not all of these are directly representable by the machine.
+-- Things like ImmLit are slurped out and put in a data segment instead.
--
data Imm
- = ImmInt Int
+ = ImmInt Int
- -- Sigh.
- | ImmInteger Integer
+ -- Sigh.
+ | ImmInteger Integer
- -- AbstractC Label (with baggage)
- | ImmCLbl CLabel
+ -- AbstractC Label (with baggage)
+ | ImmCLbl CLabel
- -- Simple string
- | ImmLit SDoc
- | ImmIndex CLabel Int
- | ImmFloat Rational
- | ImmDouble Rational
+ -- Simple string
+ | ImmLit SDoc
+ | ImmIndex CLabel Int
+ | ImmFloat Rational
+ | ImmDouble Rational
- | ImmConstantSum Imm Imm
- | ImmConstantDiff Imm Imm
+ | ImmConstantSum Imm Imm
+ | ImmConstantDiff Imm Imm
- | LO Imm
- | HI Imm
+ | LO Imm
+ | HI Imm
-- | Create a ImmLit containing this string.
@@ -52,24 +44,22 @@ strImmLit s = ImmLit (text s)
-- | Convert a CmmLit to an Imm.
--- Narrow to the width: a CmmInt might be out of
--- range, but we assume that ImmInteger only contains
--- in-range values. A signed value should be fine here.
+-- Narrow to the width: a CmmInt might be out of
+-- range, but we assume that ImmInteger only contains
+-- in-range values. A signed value should be fine here.
--
litToImm :: CmmLit -> Imm
litToImm lit
= case lit of
- CmmInt i w -> ImmInteger (narrowS w i)
- CmmFloat f W32 -> ImmFloat f
- CmmFloat f W64 -> ImmDouble f
- CmmLabel l -> ImmCLbl l
- CmmLabelOff l off -> ImmIndex l off
+ CmmInt i w -> ImmInteger (narrowS w i)
+ CmmFloat f W32 -> ImmFloat f
+ CmmFloat f W64 -> ImmDouble f
+ CmmLabel l -> ImmCLbl l
+ CmmLabelOff l off -> ImmIndex l off
- CmmLabelDiffOff l1 l2 off
- -> ImmConstantSum
- (ImmConstantDiff (ImmCLbl l1) (ImmCLbl l2))
- (ImmInt off)
+ CmmLabelDiffOff l1 l2 off
+ -> ImmConstantSum
+ (ImmConstantDiff (ImmCLbl l1) (ImmCLbl l2))
+ (ImmInt off)
_ -> panic "SPARC.Regs.litToImm: no match"
-
-
diff --git a/compiler/nativeGen/SPARC/Instr.hs b/compiler/nativeGen/SPARC/Instr.hs
index 601e04787a54..fb8cc0cadc2c 100644
--- a/compiler/nativeGen/SPARC/Instr.hs
+++ b/compiler/nativeGen/SPARC/Instr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Machine-dependent assembly language
@@ -5,28 +7,20 @@
-- (c) The University of Glasgow 1993-2004
--
-----------------------------------------------------------------------------
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
#include "HsVersions.h"
#include "nativeGen/NCG.h"
module SPARC.Instr (
- RI(..),
- riZero,
-
- fpRelEA,
- moveSp,
-
- isUnconditionalJump,
-
- Instr(..),
- maxSpillSlots
+ RI(..),
+ riZero,
+
+ fpRelEA,
+ moveSp,
+
+ isUnconditionalJump,
+
+ Instr(..),
+ maxSpillSlots
)
where
@@ -55,23 +49,23 @@ import Platform
-- | Register or immediate
-data RI
- = RIReg Reg
- | RIImm Imm
+data RI
+ = RIReg Reg
+ | RIImm Imm
-- | Check if a RI represents a zero value.
--- - a literal zero
--- - register %g0, which is always zero.
+-- - a literal zero
+-- - register %g0, which is always zero.
--
-riZero :: RI -> Bool
-riZero (RIImm (ImmInt 0)) = True
-riZero (RIImm (ImmInteger 0)) = True
-riZero (RIReg (RegReal (RealRegSingle 0))) = True
-riZero _ = False
+riZero :: RI -> Bool
+riZero (RIImm (ImmInt 0)) = True
+riZero (RIImm (ImmInteger 0)) = True
+riZero (RIReg (RegReal (RealRegSingle 0))) = True
+riZero _ = False
-- | Calculate the effective address which would be used by the
--- corresponding fpRel sequence.
+-- corresponding fpRel sequence.
fpRelEA :: Int -> Reg -> Instr
fpRelEA n dst
= ADD False False fp (RIImm (ImmInt (n * wordLength))) dst
@@ -86,294 +80,294 @@ moveSp n
isUnconditionalJump :: Instr -> Bool
isUnconditionalJump ii
= case ii of
- CALL{} -> True
- JMP{} -> True
- JMP_TBL{} -> True
- BI ALWAYS _ _ -> True
- BF ALWAYS _ _ -> True
- _ -> False
+ CALL{} -> True
+ JMP{} -> True
+ JMP_TBL{} -> True
+ BI ALWAYS _ _ -> True
+ BF ALWAYS _ _ -> True
+ _ -> False
-- | instance for sparc instruction set
instance Instruction Instr where
- regUsageOfInstr = sparc_regUsageOfInstr
- patchRegsOfInstr = sparc_patchRegsOfInstr
- isJumpishInstr = sparc_isJumpishInstr
- jumpDestsOfInstr = sparc_jumpDestsOfInstr
- patchJumpInstr = sparc_patchJumpInstr
- mkSpillInstr = sparc_mkSpillInstr
- mkLoadInstr = sparc_mkLoadInstr
- takeDeltaInstr = sparc_takeDeltaInstr
- isMetaInstr = sparc_isMetaInstr
- mkRegRegMoveInstr = sparc_mkRegRegMoveInstr
- takeRegRegMoveInstr = sparc_takeRegRegMoveInstr
- mkJumpInstr = sparc_mkJumpInstr
+ regUsageOfInstr = sparc_regUsageOfInstr
+ patchRegsOfInstr = sparc_patchRegsOfInstr
+ isJumpishInstr = sparc_isJumpishInstr
+ jumpDestsOfInstr = sparc_jumpDestsOfInstr
+ patchJumpInstr = sparc_patchJumpInstr
+ mkSpillInstr = sparc_mkSpillInstr
+ mkLoadInstr = sparc_mkLoadInstr
+ takeDeltaInstr = sparc_takeDeltaInstr
+ isMetaInstr = sparc_isMetaInstr
+ mkRegRegMoveInstr = sparc_mkRegRegMoveInstr
+ takeRegRegMoveInstr = sparc_takeRegRegMoveInstr
+ mkJumpInstr = sparc_mkJumpInstr
mkStackAllocInstr = panic "no sparc_mkStackAllocInstr"
mkStackDeallocInstr = panic "no sparc_mkStackDeallocInstr"
-- | SPARC instruction set.
--- Not complete. This is only the ones we need.
+-- Not complete. This is only the ones we need.
--
data Instr
- -- meta ops --------------------------------------------------
- -- comment pseudo-op
- = COMMENT FastString
-
- -- some static data spat out during code generation.
- -- Will be extracted before pretty-printing.
- | LDATA Section CmmStatics
-
- -- Start a new basic block. Useful during codegen, removed later.
- -- Preceding instruction should be a jump, as per the invariants
- -- for a BasicBlock (see Cmm).
- | NEWBLOCK BlockId
-
- -- specify current stack offset for benefit of subsequent passes.
- | DELTA Int
-
- -- real instrs -----------------------------------------------
- -- Loads and stores.
- | LD Size AddrMode Reg -- size, src, dst
- | ST Size Reg AddrMode -- size, src, dst
-
- -- Int Arithmetic.
- -- x: add/sub with carry bit.
- -- In SPARC V9 addx and friends were renamed addc.
- --
- -- cc: modify condition codes
- --
- | ADD Bool Bool Reg RI Reg -- x?, cc?, src1, src2, dst
- | SUB Bool Bool Reg RI Reg -- x?, cc?, src1, src2, dst
-
- | UMUL Bool Reg RI Reg -- cc?, src1, src2, dst
- | SMUL Bool Reg RI Reg -- cc?, src1, src2, dst
-
-
- -- The SPARC divide instructions perform 64bit by 32bit division
- -- The Y register is xored into the first operand.
-
- -- On _some implementations_ the Y register is overwritten by
- -- the remainder, so we have to make sure it is 0 each time.
-
- -- dst <- ((Y `shiftL` 32) `or` src1) `div` src2
- | UDIV Bool Reg RI Reg -- cc?, src1, src2, dst
- | SDIV Bool Reg RI Reg -- cc?, src1, src2, dst
-
- | RDY Reg -- move contents of Y register to reg
- | WRY Reg Reg -- Y <- src1 `xor` src2
-
- -- Logic operations.
- | AND Bool Reg RI Reg -- cc?, src1, src2, dst
- | ANDN Bool Reg RI Reg -- cc?, src1, src2, dst
- | OR Bool Reg RI Reg -- cc?, src1, src2, dst
- | ORN Bool Reg RI Reg -- cc?, src1, src2, dst
- | XOR Bool Reg RI Reg -- cc?, src1, src2, dst
- | XNOR Bool Reg RI Reg -- cc?, src1, src2, dst
- | SLL Reg RI Reg -- src1, src2, dst
- | SRL Reg RI Reg -- src1, src2, dst
- | SRA Reg RI Reg -- src1, src2, dst
-
- -- Load immediates.
- | SETHI Imm Reg -- src, dst
-
- -- Do nothing.
- -- Implemented by the assembler as SETHI 0, %g0, but worth an alias
- | NOP
-
- -- Float Arithmetic.
- -- Note that we cheat by treating F{ABS,MOV,NEG} of doubles as single
- -- instructions right up until we spit them out.
- --
- | FABS Size Reg Reg -- src dst
- | FADD Size Reg Reg Reg -- src1, src2, dst
- | FCMP Bool Size Reg Reg -- exception?, src1, src2, dst
- | FDIV Size Reg Reg Reg -- src1, src2, dst
- | FMOV Size Reg Reg -- src, dst
- | FMUL Size Reg Reg Reg -- src1, src2, dst
- | FNEG Size Reg Reg -- src, dst
- | FSQRT Size Reg Reg -- src, dst
- | FSUB Size Reg Reg Reg -- src1, src2, dst
- | FxTOy Size Size Reg Reg -- src, dst
-
- -- Jumping around.
- | BI Cond Bool BlockId -- cond, annul?, target
- | BF Cond Bool BlockId -- cond, annul?, target
-
- | JMP AddrMode -- target
-
- -- With a tabled jump we know all the possible destinations.
- -- We also need this info so we can work out what regs are live across the jump.
- --
- | JMP_TBL AddrMode [Maybe BlockId] CLabel
-
- | CALL (Either Imm Reg) Int Bool -- target, args, terminal
+ -- meta ops --------------------------------------------------
+ -- comment pseudo-op
+ = COMMENT FastString
+
+ -- some static data spat out during code generation.
+ -- Will be extracted before pretty-printing.
+ | LDATA Section CmmStatics
+
+ -- Start a new basic block. Useful during codegen, removed later.
+ -- Preceding instruction should be a jump, as per the invariants
+ -- for a BasicBlock (see Cmm).
+ | NEWBLOCK BlockId
+
+ -- specify current stack offset for benefit of subsequent passes.
+ | DELTA Int
+
+ -- real instrs -----------------------------------------------
+ -- Loads and stores.
+ | LD Size AddrMode Reg -- size, src, dst
+ | ST Size Reg AddrMode -- size, src, dst
+
+ -- Int Arithmetic.
+ -- x: add/sub with carry bit.
+ -- In SPARC V9 addx and friends were renamed addc.
+ --
+ -- cc: modify condition codes
+ --
+ | ADD Bool Bool Reg RI Reg -- x?, cc?, src1, src2, dst
+ | SUB Bool Bool Reg RI Reg -- x?, cc?, src1, src2, dst
+
+ | UMUL Bool Reg RI Reg -- cc?, src1, src2, dst
+ | SMUL Bool Reg RI Reg -- cc?, src1, src2, dst
+
+
+ -- The SPARC divide instructions perform 64bit by 32bit division
+ -- The Y register is xored into the first operand.
+
+ -- On _some implementations_ the Y register is overwritten by
+ -- the remainder, so we have to make sure it is 0 each time.
+
+ -- dst <- ((Y `shiftL` 32) `or` src1) `div` src2
+ | UDIV Bool Reg RI Reg -- cc?, src1, src2, dst
+ | SDIV Bool Reg RI Reg -- cc?, src1, src2, dst
+
+ | RDY Reg -- move contents of Y register to reg
+ | WRY Reg Reg -- Y <- src1 `xor` src2
+
+ -- Logic operations.
+ | AND Bool Reg RI Reg -- cc?, src1, src2, dst
+ | ANDN Bool Reg RI Reg -- cc?, src1, src2, dst
+ | OR Bool Reg RI Reg -- cc?, src1, src2, dst
+ | ORN Bool Reg RI Reg -- cc?, src1, src2, dst
+ | XOR Bool Reg RI Reg -- cc?, src1, src2, dst
+ | XNOR Bool Reg RI Reg -- cc?, src1, src2, dst
+ | SLL Reg RI Reg -- src1, src2, dst
+ | SRL Reg RI Reg -- src1, src2, dst
+ | SRA Reg RI Reg -- src1, src2, dst
+
+ -- Load immediates.
+ | SETHI Imm Reg -- src, dst
+
+ -- Do nothing.
+ -- Implemented by the assembler as SETHI 0, %g0, but worth an alias
+ | NOP
+
+ -- Float Arithmetic.
+ -- Note that we cheat by treating F{ABS,MOV,NEG} of doubles as single
+ -- instructions right up until we spit them out.
+ --
+ | FABS Size Reg Reg -- src dst
+ | FADD Size Reg Reg Reg -- src1, src2, dst
+ | FCMP Bool Size Reg Reg -- exception?, src1, src2, dst
+ | FDIV Size Reg Reg Reg -- src1, src2, dst
+ | FMOV Size Reg Reg -- src, dst
+ | FMUL Size Reg Reg Reg -- src1, src2, dst
+ | FNEG Size Reg Reg -- src, dst
+ | FSQRT Size Reg Reg -- src, dst
+ | FSUB Size Reg Reg Reg -- src1, src2, dst
+ | FxTOy Size Size Reg Reg -- src, dst
+
+ -- Jumping around.
+ | BI Cond Bool BlockId -- cond, annul?, target
+ | BF Cond Bool BlockId -- cond, annul?, target
+
+ | JMP AddrMode -- target
+
+ -- With a tabled jump we know all the possible destinations.
+ -- We also need this info so we can work out what regs are live across the jump.
+ --
+ | JMP_TBL AddrMode [Maybe BlockId] CLabel
+
+ | CALL (Either Imm Reg) Int Bool -- target, args, terminal
-- | regUsage returns the sets of src and destination registers used
--- by a particular instruction. Machine registers that are
--- pre-allocated to stgRegs are filtered out, because they are
--- uninteresting from a register allocation standpoint. (We wouldn't
--- want them to end up on the free list!) As far as we are concerned,
--- the fixed registers simply don't exist (for allocation purposes,
--- anyway).
-
--- regUsage doesn't need to do any trickery for jumps and such. Just
--- state precisely the regs read and written by that insn. The
--- consequences of control flow transfers, as far as register
--- allocation goes, are taken care of by the register allocator.
+-- by a particular instruction. Machine registers that are
+-- pre-allocated to stgRegs are filtered out, because they are
+-- uninteresting from a register allocation standpoint. (We wouldn't
+-- want them to end up on the free list!) As far as we are concerned,
+-- the fixed registers simply don't exist (for allocation purposes,
+-- anyway).
+
+-- regUsage doesn't need to do any trickery for jumps and such. Just
+-- state precisely the regs read and written by that insn. The
+-- consequences of control flow transfers, as far as register
+-- allocation goes, are taken care of by the register allocator.
--
sparc_regUsageOfInstr :: Platform -> Instr -> RegUsage
sparc_regUsageOfInstr platform instr
= case instr of
- LD _ addr reg -> usage (regAddr addr, [reg])
- ST _ reg addr -> usage (reg : regAddr addr, [])
- ADD _ _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SUB _ _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- UMUL _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SMUL _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- UDIV _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SDIV _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- RDY rd -> usage ([], [rd])
- WRY r1 r2 -> usage ([r1, r2], [])
- AND _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- ANDN _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- OR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- ORN _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- XOR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- XNOR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SLL r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SRL r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SRA r1 ar r2 -> usage (r1 : regRI ar, [r2])
- SETHI _ reg -> usage ([], [reg])
- FABS _ r1 r2 -> usage ([r1], [r2])
- FADD _ r1 r2 r3 -> usage ([r1, r2], [r3])
- FCMP _ _ r1 r2 -> usage ([r1, r2], [])
- FDIV _ r1 r2 r3 -> usage ([r1, r2], [r3])
- FMOV _ r1 r2 -> usage ([r1], [r2])
- FMUL _ r1 r2 r3 -> usage ([r1, r2], [r3])
- FNEG _ r1 r2 -> usage ([r1], [r2])
- FSQRT _ r1 r2 -> usage ([r1], [r2])
- FSUB _ r1 r2 r3 -> usage ([r1, r2], [r3])
- FxTOy _ _ r1 r2 -> usage ([r1], [r2])
-
- JMP addr -> usage (regAddr addr, [])
- JMP_TBL addr _ _ -> usage (regAddr addr, [])
-
- CALL (Left _ ) _ True -> noUsage
- CALL (Left _ ) n False -> usage (argRegs n, callClobberedRegs)
- CALL (Right reg) _ True -> usage ([reg], [])
- CALL (Right reg) n False -> usage (reg : (argRegs n), callClobberedRegs)
- _ -> noUsage
+ LD _ addr reg -> usage (regAddr addr, [reg])
+ ST _ reg addr -> usage (reg : regAddr addr, [])
+ ADD _ _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SUB _ _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ UMUL _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SMUL _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ UDIV _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SDIV _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ RDY rd -> usage ([], [rd])
+ WRY r1 r2 -> usage ([r1, r2], [])
+ AND _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ ANDN _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ OR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ ORN _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ XOR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ XNOR _ r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SLL r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SRL r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SRA r1 ar r2 -> usage (r1 : regRI ar, [r2])
+ SETHI _ reg -> usage ([], [reg])
+ FABS _ r1 r2 -> usage ([r1], [r2])
+ FADD _ r1 r2 r3 -> usage ([r1, r2], [r3])
+ FCMP _ _ r1 r2 -> usage ([r1, r2], [])
+ FDIV _ r1 r2 r3 -> usage ([r1, r2], [r3])
+ FMOV _ r1 r2 -> usage ([r1], [r2])
+ FMUL _ r1 r2 r3 -> usage ([r1, r2], [r3])
+ FNEG _ r1 r2 -> usage ([r1], [r2])
+ FSQRT _ r1 r2 -> usage ([r1], [r2])
+ FSUB _ r1 r2 r3 -> usage ([r1, r2], [r3])
+ FxTOy _ _ r1 r2 -> usage ([r1], [r2])
+
+ JMP addr -> usage (regAddr addr, [])
+ JMP_TBL addr _ _ -> usage (regAddr addr, [])
+
+ CALL (Left _ ) _ True -> noUsage
+ CALL (Left _ ) n False -> usage (argRegs n, callClobberedRegs)
+ CALL (Right reg) _ True -> usage ([reg], [])
+ CALL (Right reg) n False -> usage (reg : (argRegs n), callClobberedRegs)
+ _ -> noUsage
where
- usage (src, dst)
+ usage (src, dst)
= RU (filter (interesting platform) src)
(filter (interesting platform) dst)
- regAddr (AddrRegReg r1 r2) = [r1, r2]
- regAddr (AddrRegImm r1 _) = [r1]
+ regAddr (AddrRegReg r1 r2) = [r1, r2]
+ regAddr (AddrRegImm r1 _) = [r1]
- regRI (RIReg r) = [r]
- regRI _ = []
+ regRI (RIReg r) = [r]
+ regRI _ = []
--- | Interesting regs are virtuals, or ones that are allocatable
--- by the register allocator.
+-- | Interesting regs are virtuals, or ones that are allocatable
+-- by the register allocator.
interesting :: Platform -> Reg -> Bool
interesting platform reg
= case reg of
- RegVirtual _ -> True
- RegReal (RealRegSingle r1) -> isFastTrue (freeReg platform r1)
- RegReal (RealRegPair r1 _) -> isFastTrue (freeReg platform r1)
+ RegVirtual _ -> True
+ RegReal (RealRegSingle r1) -> isFastTrue (freeReg platform r1)
+ RegReal (RealRegPair r1 _) -> isFastTrue (freeReg platform r1)
-- | Apply a given mapping to tall the register references in this instruction.
sparc_patchRegsOfInstr :: Instr -> (Reg -> Reg) -> Instr
sparc_patchRegsOfInstr instr env = case instr of
- LD sz addr reg -> LD sz (fixAddr addr) (env reg)
- ST sz reg addr -> ST sz (env reg) (fixAddr addr)
-
- ADD x cc r1 ar r2 -> ADD x cc (env r1) (fixRI ar) (env r2)
- SUB x cc r1 ar r2 -> SUB x cc (env r1) (fixRI ar) (env r2)
- UMUL cc r1 ar r2 -> UMUL cc (env r1) (fixRI ar) (env r2)
- SMUL cc r1 ar r2 -> SMUL cc (env r1) (fixRI ar) (env r2)
- UDIV cc r1 ar r2 -> UDIV cc (env r1) (fixRI ar) (env r2)
- SDIV cc r1 ar r2 -> SDIV cc (env r1) (fixRI ar) (env r2)
- RDY rd -> RDY (env rd)
- WRY r1 r2 -> WRY (env r1) (env r2)
- AND b r1 ar r2 -> AND b (env r1) (fixRI ar) (env r2)
- ANDN b r1 ar r2 -> ANDN b (env r1) (fixRI ar) (env r2)
- OR b r1 ar r2 -> OR b (env r1) (fixRI ar) (env r2)
- ORN b r1 ar r2 -> ORN b (env r1) (fixRI ar) (env r2)
- XOR b r1 ar r2 -> XOR b (env r1) (fixRI ar) (env r2)
- XNOR b r1 ar r2 -> XNOR b (env r1) (fixRI ar) (env r2)
- SLL r1 ar r2 -> SLL (env r1) (fixRI ar) (env r2)
- SRL r1 ar r2 -> SRL (env r1) (fixRI ar) (env r2)
- SRA r1 ar r2 -> SRA (env r1) (fixRI ar) (env r2)
-
- SETHI imm reg -> SETHI imm (env reg)
-
- FABS s r1 r2 -> FABS s (env r1) (env r2)
- FADD s r1 r2 r3 -> FADD s (env r1) (env r2) (env r3)
- FCMP e s r1 r2 -> FCMP e s (env r1) (env r2)
- FDIV s r1 r2 r3 -> FDIV s (env r1) (env r2) (env r3)
- FMOV s r1 r2 -> FMOV s (env r1) (env r2)
- FMUL s r1 r2 r3 -> FMUL s (env r1) (env r2) (env r3)
- FNEG s r1 r2 -> FNEG s (env r1) (env r2)
- FSQRT s r1 r2 -> FSQRT s (env r1) (env r2)
- FSUB s r1 r2 r3 -> FSUB s (env r1) (env r2) (env r3)
- FxTOy s1 s2 r1 r2 -> FxTOy s1 s2 (env r1) (env r2)
-
- JMP addr -> JMP (fixAddr addr)
- JMP_TBL addr ids l -> JMP_TBL (fixAddr addr) ids l
-
- CALL (Left i) n t -> CALL (Left i) n t
- CALL (Right r) n t -> CALL (Right (env r)) n t
- _ -> instr
+ LD sz addr reg -> LD sz (fixAddr addr) (env reg)
+ ST sz reg addr -> ST sz (env reg) (fixAddr addr)
+
+ ADD x cc r1 ar r2 -> ADD x cc (env r1) (fixRI ar) (env r2)
+ SUB x cc r1 ar r2 -> SUB x cc (env r1) (fixRI ar) (env r2)
+ UMUL cc r1 ar r2 -> UMUL cc (env r1) (fixRI ar) (env r2)
+ SMUL cc r1 ar r2 -> SMUL cc (env r1) (fixRI ar) (env r2)
+ UDIV cc r1 ar r2 -> UDIV cc (env r1) (fixRI ar) (env r2)
+ SDIV cc r1 ar r2 -> SDIV cc (env r1) (fixRI ar) (env r2)
+ RDY rd -> RDY (env rd)
+ WRY r1 r2 -> WRY (env r1) (env r2)
+ AND b r1 ar r2 -> AND b (env r1) (fixRI ar) (env r2)
+ ANDN b r1 ar r2 -> ANDN b (env r1) (fixRI ar) (env r2)
+ OR b r1 ar r2 -> OR b (env r1) (fixRI ar) (env r2)
+ ORN b r1 ar r2 -> ORN b (env r1) (fixRI ar) (env r2)
+ XOR b r1 ar r2 -> XOR b (env r1) (fixRI ar) (env r2)
+ XNOR b r1 ar r2 -> XNOR b (env r1) (fixRI ar) (env r2)
+ SLL r1 ar r2 -> SLL (env r1) (fixRI ar) (env r2)
+ SRL r1 ar r2 -> SRL (env r1) (fixRI ar) (env r2)
+ SRA r1 ar r2 -> SRA (env r1) (fixRI ar) (env r2)
+
+ SETHI imm reg -> SETHI imm (env reg)
+
+ FABS s r1 r2 -> FABS s (env r1) (env r2)
+ FADD s r1 r2 r3 -> FADD s (env r1) (env r2) (env r3)
+ FCMP e s r1 r2 -> FCMP e s (env r1) (env r2)
+ FDIV s r1 r2 r3 -> FDIV s (env r1) (env r2) (env r3)
+ FMOV s r1 r2 -> FMOV s (env r1) (env r2)
+ FMUL s r1 r2 r3 -> FMUL s (env r1) (env r2) (env r3)
+ FNEG s r1 r2 -> FNEG s (env r1) (env r2)
+ FSQRT s r1 r2 -> FSQRT s (env r1) (env r2)
+ FSUB s r1 r2 r3 -> FSUB s (env r1) (env r2) (env r3)
+ FxTOy s1 s2 r1 r2 -> FxTOy s1 s2 (env r1) (env r2)
+
+ JMP addr -> JMP (fixAddr addr)
+ JMP_TBL addr ids l -> JMP_TBL (fixAddr addr) ids l
+
+ CALL (Left i) n t -> CALL (Left i) n t
+ CALL (Right r) n t -> CALL (Right (env r)) n t
+ _ -> instr
where
- fixAddr (AddrRegReg r1 r2) = AddrRegReg (env r1) (env r2)
- fixAddr (AddrRegImm r1 i) = AddrRegImm (env r1) i
+ fixAddr (AddrRegReg r1 r2) = AddrRegReg (env r1) (env r2)
+ fixAddr (AddrRegImm r1 i) = AddrRegImm (env r1) i
- fixRI (RIReg r) = RIReg (env r)
- fixRI other = other
+ fixRI (RIReg r) = RIReg (env r)
+ fixRI other = other
--------------------------------------------------------------------------------
sparc_isJumpishInstr :: Instr -> Bool
sparc_isJumpishInstr instr
= case instr of
- BI{} -> True
- BF{} -> True
- JMP{} -> True
- JMP_TBL{} -> True
- CALL{} -> True
- _ -> False
+ BI{} -> True
+ BF{} -> True
+ JMP{} -> True
+ JMP_TBL{} -> True
+ CALL{} -> True
+ _ -> False
sparc_jumpDestsOfInstr :: Instr -> [BlockId]
sparc_jumpDestsOfInstr insn
= case insn of
- BI _ _ id -> [id]
- BF _ _ id -> [id]
- JMP_TBL _ ids _ -> [id | Just id <- ids]
- _ -> []
+ BI _ _ id -> [id]
+ BF _ _ id -> [id]
+ JMP_TBL _ ids _ -> [id | Just id <- ids]
+ _ -> []
sparc_patchJumpInstr :: Instr -> (BlockId -> BlockId) -> Instr
sparc_patchJumpInstr insn patchF
= case insn of
- BI cc annul id -> BI cc annul (patchF id)
- BF cc annul id -> BF cc annul (patchF id)
- JMP_TBL n ids l -> JMP_TBL n (map (fmap patchF) ids) l
- _ -> insn
+ BI cc annul id -> BI cc annul (patchF id)
+ BF cc annul id -> BF cc annul (patchF id)
+ JMP_TBL n ids l -> JMP_TBL n (map (fmap patchF) ids) l
+ _ -> insn
--------------------------------------------------------------------------------
-- | Make a spill instruction.
--- On SPARC we spill below frame pointer leaving 2 words/spill
+-- On SPARC we spill below frame pointer leaving 2 words/spill
sparc_mkSpillInstr
:: DynFlags
-> Reg -- ^ register to spill
@@ -385,12 +379,12 @@ sparc_mkSpillInstr dflags reg _ slot
= let platform = targetPlatform dflags
off = spillSlotToOffset dflags slot
off_w = 1 + (off `div` 4)
- sz = case targetClassOfReg platform reg of
- RcInteger -> II32
- RcFloat -> FF32
- RcDouble -> FF64
- _ -> panic "sparc_mkSpillInstr"
-
+ sz = case targetClassOfReg platform reg of
+ RcInteger -> II32
+ RcFloat -> FF32
+ RcDouble -> FF64
+ _ -> panic "sparc_mkSpillInstr"
+
in ST sz reg (fpRel (negate off_w))
@@ -405,12 +399,12 @@ sparc_mkLoadInstr
sparc_mkLoadInstr dflags reg _ slot
= let platform = targetPlatform dflags
off = spillSlotToOffset dflags slot
- off_w = 1 + (off `div` 4)
- sz = case targetClassOfReg platform reg of
- RcInteger -> II32
- RcFloat -> FF32
- RcDouble -> FF64
- _ -> panic "sparc_mkLoadInstr"
+ off_w = 1 + (off `div` 4)
+ sz = case targetClassOfReg platform reg of
+ RcInteger -> II32
+ RcFloat -> FF32
+ RcDouble -> FF64
+ _ -> panic "sparc_mkLoadInstr"
in LD sz (fpRel (- off_w)) reg
@@ -418,32 +412,32 @@ sparc_mkLoadInstr dflags reg _ slot
--------------------------------------------------------------------------------
-- | See if this instruction is telling us the current C stack delta
sparc_takeDeltaInstr
- :: Instr
- -> Maybe Int
-
+ :: Instr
+ -> Maybe Int
+
sparc_takeDeltaInstr instr
= case instr of
- DELTA i -> Just i
- _ -> Nothing
+ DELTA i -> Just i
+ _ -> Nothing
sparc_isMetaInstr
- :: Instr
- -> Bool
-
+ :: Instr
+ -> Bool
+
sparc_isMetaInstr instr
= case instr of
- COMMENT{} -> True
- LDATA{} -> True
- NEWBLOCK{} -> True
- DELTA{} -> True
- _ -> False
-
+ COMMENT{} -> True
+ LDATA{} -> True
+ NEWBLOCK{} -> True
+ DELTA{} -> True
+ _ -> False
+
-- | Make a reg-reg move instruction.
--- On SPARC v8 there are no instructions to move directly between
--- floating point and integer regs. If we need to do that then we
--- have to go via memory.
+-- On SPARC v8 there are no instructions to move directly between
+-- floating point and integer regs. If we need to do that then we
+-- have to go via memory.
--
sparc_mkRegRegMoveInstr
:: Platform
@@ -452,40 +446,39 @@ sparc_mkRegRegMoveInstr
-> Instr
sparc_mkRegRegMoveInstr platform src dst
- | srcClass <- targetClassOfReg platform src
- , dstClass <- targetClassOfReg platform dst
- , srcClass == dstClass
- = case srcClass of
- RcInteger -> ADD False False src (RIReg g0) dst
- RcDouble -> FMOV FF64 src dst
- RcFloat -> FMOV FF32 src dst
+ | srcClass <- targetClassOfReg platform src
+ , dstClass <- targetClassOfReg platform dst
+ , srcClass == dstClass
+ = case srcClass of
+ RcInteger -> ADD False False src (RIReg g0) dst
+ RcDouble -> FMOV FF64 src dst
+ RcFloat -> FMOV FF32 src dst
_ -> panic "sparc_mkRegRegMoveInstr"
-
- | otherwise
- = panic "SPARC.Instr.mkRegRegMoveInstr: classes of src and dest not the same"
+
+ | otherwise
+ = panic "SPARC.Instr.mkRegRegMoveInstr: classes of src and dest not the same"
-- | Check whether an instruction represents a reg-reg move.
--- The register allocator attempts to eliminate reg->reg moves whenever it can,
--- by assigning the src and dest temporaries to the same real register.
+-- The register allocator attempts to eliminate reg->reg moves whenever it can,
+-- by assigning the src and dest temporaries to the same real register.
--
sparc_takeRegRegMoveInstr :: Instr -> Maybe (Reg,Reg)
sparc_takeRegRegMoveInstr instr
= case instr of
- ADD False False src (RIReg src2) dst
- | g0 == src2 -> Just (src, dst)
+ ADD False False src (RIReg src2) dst
+ | g0 == src2 -> Just (src, dst)
- FMOV FF64 src dst -> Just (src, dst)
- FMOV FF32 src dst -> Just (src, dst)
- _ -> Nothing
+ FMOV FF64 src dst -> Just (src, dst)
+ FMOV FF32 src dst -> Just (src, dst)
+ _ -> Nothing
-- | Make an unconditional branch instruction.
sparc_mkJumpInstr
- :: BlockId
- -> [Instr]
-
-sparc_mkJumpInstr id
- = [BI ALWAYS False id
- , NOP] -- fill the branch delay slot.
+ :: BlockId
+ -> [Instr]
+sparc_mkJumpInstr id
+ = [BI ALWAYS False id
+ , NOP] -- fill the branch delay slot.
diff --git a/compiler/nativeGen/SPARC/Ppr.hs b/compiler/nativeGen/SPARC/Ppr.hs
index 601b5288a0d9..654179e0770c 100644
--- a/compiler/nativeGen/SPARC/Ppr.hs
+++ b/compiler/nativeGen/SPARC/Ppr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Pretty-printing assembly language
diff --git a/compiler/nativeGen/SPARC/Regs.hs b/compiler/nativeGen/SPARC/Regs.hs
index 55b6ac9156e2..394389c4bf8e 100644
--- a/compiler/nativeGen/SPARC/Regs.hs
+++ b/compiler/nativeGen/SPARC/Regs.hs
@@ -1,39 +1,32 @@
-- -----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 1994-2004
---
+--
-- -----------------------------------------------------------------------------
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.Regs (
- -- registers
- showReg,
- virtualRegSqueeze,
- realRegSqueeze,
- classOfRealReg,
- allRealRegs,
-
- -- machine specific info
- gReg, iReg, lReg, oReg, fReg,
- fp, sp, g0, g1, g2, o0, o1, f0, f1, f6, f8, f22, f26, f27,
-
- -- allocatable
- allocatableRegs,
-
- -- args
- argRegs,
- allArgRegs,
- callClobberedRegs,
-
- --
- mkVirtualReg,
- regDotColor
+ -- registers
+ showReg,
+ virtualRegSqueeze,
+ realRegSqueeze,
+ classOfRealReg,
+ allRealRegs,
+
+ -- machine specific info
+ gReg, iReg, lReg, oReg, fReg,
+ fp, sp, g0, g1, g2, o0, o1, f0, f1, f6, f8, f22, f26, f27,
+
+ -- allocatable
+ allocatableRegs,
+
+ -- args
+ argRegs,
+ allArgRegs,
+ callClobberedRegs,
+
+ --
+ mkVirtualReg,
+ regDotColor
)
where
@@ -50,65 +43,65 @@ import FastTypes
import FastBool
{-
- The SPARC has 64 registers of interest; 32 integer registers and 32
- floating point registers. The mapping of STG registers to SPARC
- machine registers is defined in StgRegs.h. We are, of course,
- prepared for any eventuality.
-
- The whole fp-register pairing thing on sparcs is a huge nuisance. See
- includes/stg/MachRegs.h for a description of what's going on
- here.
+ The SPARC has 64 registers of interest; 32 integer registers and 32
+ floating point registers. The mapping of STG registers to SPARC
+ machine registers is defined in StgRegs.h. We are, of course,
+ prepared for any eventuality.
+
+ The whole fp-register pairing thing on sparcs is a huge nuisance. See
+ includes/stg/MachRegs.h for a description of what's going on
+ here.
-}
-- | Get the standard name for the register with this number.
showReg :: RegNo -> String
showReg n
- | n >= 0 && n < 8 = "%g" ++ show n
- | n >= 8 && n < 16 = "%o" ++ show (n-8)
- | n >= 16 && n < 24 = "%l" ++ show (n-16)
- | n >= 24 && n < 32 = "%i" ++ show (n-24)
- | n >= 32 && n < 64 = "%f" ++ show (n-32)
- | otherwise = panic "SPARC.Regs.showReg: unknown sparc register"
+ | n >= 0 && n < 8 = "%g" ++ show n
+ | n >= 8 && n < 16 = "%o" ++ show (n-8)
+ | n >= 16 && n < 24 = "%l" ++ show (n-16)
+ | n >= 24 && n < 32 = "%i" ++ show (n-24)
+ | n >= 32 && n < 64 = "%f" ++ show (n-32)
+ | otherwise = panic "SPARC.Regs.showReg: unknown sparc register"
-- Get the register class of a certain real reg
classOfRealReg :: RealReg -> RegClass
classOfRealReg reg
= case reg of
- RealRegSingle i
- | i < 32 -> RcInteger
- | otherwise -> RcFloat
-
- RealRegPair{} -> RcDouble
+ RealRegSingle i
+ | i < 32 -> RcInteger
+ | otherwise -> RcFloat
+
+ RealRegPair{} -> RcDouble
-- | regSqueeze_class reg
--- Calculuate the maximum number of register colors that could be
--- denied to a node of this class due to having this reg
--- as a neighbour.
+-- Calculuate the maximum number of register colors that could be
+-- denied to a node of this class due to having this reg
+-- as a neighbour.
--
{-# INLINE virtualRegSqueeze #-}
virtualRegSqueeze :: RegClass -> VirtualReg -> FastInt
virtualRegSqueeze cls vr
= case cls of
- RcInteger
- -> case vr of
- VirtualRegI{} -> _ILIT(1)
- VirtualRegHi{} -> _ILIT(1)
+ RcInteger
+ -> case vr of
+ VirtualRegI{} -> _ILIT(1)
+ VirtualRegHi{} -> _ILIT(1)
_other -> _ILIT(0)
- RcFloat
- -> case vr of
- VirtualRegF{} -> _ILIT(1)
- VirtualRegD{} -> _ILIT(2)
+ RcFloat
+ -> case vr of
+ VirtualRegF{} -> _ILIT(1)
+ VirtualRegD{} -> _ILIT(2)
_other -> _ILIT(0)
- RcDouble
- -> case vr of
- VirtualRegF{} -> _ILIT(1)
- VirtualRegD{} -> _ILIT(1)
+ RcDouble
+ -> case vr of
+ VirtualRegF{} -> _ILIT(1)
+ VirtualRegD{} -> _ILIT(1)
_other -> _ILIT(0)
_other -> _ILIT(0)
@@ -118,48 +111,48 @@ realRegSqueeze :: RegClass -> RealReg -> FastInt
realRegSqueeze cls rr
= case cls of
- RcInteger
- -> case rr of
- RealRegSingle regNo
- | regNo < 32 -> _ILIT(1)
- | otherwise -> _ILIT(0)
-
- RealRegPair{} -> _ILIT(0)
-
- RcFloat
- -> case rr of
- RealRegSingle regNo
- | regNo < 32 -> _ILIT(0)
- | otherwise -> _ILIT(1)
-
- RealRegPair{} -> _ILIT(2)
-
- RcDouble
- -> case rr of
- RealRegSingle regNo
- | regNo < 32 -> _ILIT(0)
- | otherwise -> _ILIT(1)
-
- RealRegPair{} -> _ILIT(1)
-
+ RcInteger
+ -> case rr of
+ RealRegSingle regNo
+ | regNo < 32 -> _ILIT(1)
+ | otherwise -> _ILIT(0)
+
+ RealRegPair{} -> _ILIT(0)
+
+ RcFloat
+ -> case rr of
+ RealRegSingle regNo
+ | regNo < 32 -> _ILIT(0)
+ | otherwise -> _ILIT(1)
+
+ RealRegPair{} -> _ILIT(2)
+
+ RcDouble
+ -> case rr of
+ RealRegSingle regNo
+ | regNo < 32 -> _ILIT(0)
+ | otherwise -> _ILIT(1)
+
+ RealRegPair{} -> _ILIT(1)
+
_other -> _ILIT(0)
-
--- | All the allocatable registers in the machine,
--- including register pairs.
+
+-- | All the allocatable registers in the machine,
+-- including register pairs.
allRealRegs :: [RealReg]
-allRealRegs
- = [ (RealRegSingle i) | i <- [0..63] ]
- ++ [ (RealRegPair i (i+1)) | i <- [32, 34 .. 62 ] ]
+allRealRegs
+ = [ (RealRegSingle i) | i <- [0..63] ]
+ ++ [ (RealRegPair i (i+1)) | i <- [32, 34 .. 62 ] ]
-- | Get the regno for this sort of reg
gReg, lReg, iReg, oReg, fReg :: Int -> RegNo
-gReg x = x -- global regs
-oReg x = (8 + x) -- output regs
-lReg x = (16 + x) -- local regs
-iReg x = (24 + x) -- input regs
-fReg x = (32 + x) -- float regs
+gReg x = x -- global regs
+oReg x = (8 + x) -- output regs
+lReg x = (16 + x) -- local regs
+iReg x = (24 + x) -- input regs
+fReg x = (32 + x) -- float regs
-- | Some specific regs used by the code generator.
@@ -187,88 +180,87 @@ f1 = RegReal (RealRegSingle (fReg 1))
-- | Produce the second-half-of-a-double register given the first half.
{-
fPair :: Reg -> Maybe Reg
-fPair (RealReg n)
- | n >= 32 && n `mod` 2 == 0 = Just (RealReg (n+1))
+fPair (RealReg n)
+ | n >= 32 && n `mod` 2 == 0 = Just (RealReg (n+1))
fPair (VirtualRegD u)
- = Just (VirtualRegHi u)
+ = Just (VirtualRegHi u)
fPair reg
- = trace ("MachInstrs.fPair: can't get high half of supposed double reg " ++ showPpr reg)
- Nothing
+ = trace ("MachInstrs.fPair: can't get high half of supposed double reg " ++ showPpr reg)
+ Nothing
-}
--- | All the regs that the register allocator can allocate to,
--- with the the fixed use regs removed.
---
+-- | All the regs that the register allocator can allocate to,
+-- with the the fixed use regs removed.
+--
allocatableRegs :: [RealReg]
allocatableRegs
- = let isFree rr
- = case rr of
- RealRegSingle r
- -> isFastTrue (freeReg r)
+ = let isFree rr
+ = case rr of
+ RealRegSingle r
+ -> isFastTrue (freeReg r)
- RealRegPair r1 r2
- -> isFastTrue (freeReg r1)
- && isFastTrue (freeReg r2)
+ RealRegPair r1 r2
+ -> isFastTrue (freeReg r1)
+ && isFastTrue (freeReg r2)
- in filter isFree allRealRegs
+ in filter isFree allRealRegs
--- | The registers to place arguments for function calls,
--- for some number of arguments.
+-- | The registers to place arguments for function calls,
+-- for some number of arguments.
--
argRegs :: RegNo -> [Reg]
argRegs r
= case r of
- 0 -> []
- 1 -> map (RegReal . RealRegSingle . oReg) [0]
- 2 -> map (RegReal . RealRegSingle . oReg) [0,1]
- 3 -> map (RegReal . RealRegSingle . oReg) [0,1,2]
- 4 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3]
- 5 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3,4]
- 6 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3,4,5]
- _ -> panic "MachRegs.argRegs(sparc): don't know about >6 arguments!"
+ 0 -> []
+ 1 -> map (RegReal . RealRegSingle . oReg) [0]
+ 2 -> map (RegReal . RealRegSingle . oReg) [0,1]
+ 3 -> map (RegReal . RealRegSingle . oReg) [0,1,2]
+ 4 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3]
+ 5 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3,4]
+ 6 -> map (RegReal . RealRegSingle . oReg) [0,1,2,3,4,5]
+ _ -> panic "MachRegs.argRegs(sparc): don't know about >6 arguments!"
-- | All all the regs that could possibly be returned by argRegs
--
allArgRegs :: [Reg]
-allArgRegs
- = map (RegReal . RealRegSingle) [oReg i | i <- [0..5]]
+allArgRegs
+ = map (RegReal . RealRegSingle) [oReg i | i <- [0..5]]
--- These are the regs that we cannot assume stay alive over a C call.
--- TODO: Why can we assume that o6 isn't clobbered? -- BL 2009/02
+-- These are the regs that we cannot assume stay alive over a C call.
+-- TODO: Why can we assume that o6 isn't clobbered? -- BL 2009/02
--
callClobberedRegs :: [Reg]
callClobberedRegs
- = map (RegReal . RealRegSingle)
- ( oReg 7 :
- [oReg i | i <- [0..5]] ++
- [gReg i | i <- [1..7]] ++
- [fReg i | i <- [0..31]] )
+ = map (RegReal . RealRegSingle)
+ ( oReg 7 :
+ [oReg i | i <- [0..5]] ++
+ [gReg i | i <- [1..7]] ++
+ [fReg i | i <- [0..31]] )
-- | Make a virtual reg with this size.
mkVirtualReg :: Unique -> Size -> VirtualReg
mkVirtualReg u size
- | not (isFloatSize size)
- = VirtualRegI u
+ | not (isFloatSize size)
+ = VirtualRegI u
- | otherwise
- = case size of
- FF32 -> VirtualRegF u
- FF64 -> VirtualRegD u
- _ -> panic "mkVReg"
+ | otherwise
+ = case size of
+ FF32 -> VirtualRegF u
+ FF64 -> VirtualRegD u
+ _ -> panic "mkVReg"
regDotColor :: RealReg -> SDoc
regDotColor reg
= case classOfRealReg reg of
- RcInteger -> text "blue"
- RcFloat -> text "red"
- _other -> text "green"
-
+ RcInteger -> text "blue"
+ RcFloat -> text "red"
+ _other -> text "green"
diff --git a/compiler/nativeGen/SPARC/ShortcutJump.hs b/compiler/nativeGen/SPARC/ShortcutJump.hs
index 7f978c17c509..123a345130f0 100644
--- a/compiler/nativeGen/SPARC/ShortcutJump.hs
+++ b/compiler/nativeGen/SPARC/ShortcutJump.hs
@@ -1,17 +1,9 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.ShortcutJump (
- JumpDest(..), getJumpDestBlockId,
- canShortcut,
- shortcutJump,
- shortcutStatics,
- shortBlockId
+ JumpDest(..), getJumpDestBlockId,
+ canShortcut,
+ shortcutJump,
+ shortcutStatics,
+ shortBlockId
)
where
@@ -28,9 +20,9 @@ import Unique
-data JumpDest
- = DestBlockId BlockId
- | DestImm Imm
+data JumpDest
+ = DestBlockId BlockId
+ | DestImm Imm
getJumpDestBlockId :: JumpDest -> Maybe BlockId
getJumpDestBlockId (DestBlockId bid) = Just bid
@@ -59,9 +51,9 @@ shortcutLabel fn lab
shortcutStatic :: (BlockId -> Maybe JumpDest) -> CmmStatic -> CmmStatic
shortcutStatic fn (CmmStaticLit (CmmLabel lab))
- = CmmStaticLit (CmmLabel (shortcutLabel fn lab))
+ = CmmStaticLit (CmmLabel (shortcutLabel fn lab))
shortcutStatic fn (CmmStaticLit (CmmLabelDiffOff lbl1 lbl2 off))
- = CmmStaticLit (CmmLabelDiffOff (shortcutLabel fn lbl1) lbl2 off)
+ = CmmStaticLit (CmmLabelDiffOff (shortcutLabel fn lbl1) lbl2 off)
-- slightly dodgy, we're ignoring the second label, but this
-- works with the way we use CmmLabelDiffOff for jump tables now.
shortcutStatic _ other_static
@@ -75,6 +67,3 @@ shortBlockId fn blockid =
Just (DestBlockId blockid') -> shortBlockId fn blockid'
Just (DestImm (ImmCLbl lbl)) -> lbl
_other -> panic "shortBlockId"
-
-
-
diff --git a/compiler/nativeGen/SPARC/Stack.hs b/compiler/nativeGen/SPARC/Stack.hs
index 4a6f4c1335d9..629b18789f10 100644
--- a/compiler/nativeGen/SPARC/Stack.hs
+++ b/compiler/nativeGen/SPARC/Stack.hs
@@ -1,16 +1,8 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module SPARC.Stack (
- spRel,
- fpRel,
- spillSlotToOffset,
- maxSpillSlots
+ spRel,
+ fpRel,
+ spillSlotToOffset,
+ maxSpillSlots
)
where
@@ -24,43 +16,42 @@ import DynFlags
import Outputable
-- | Get an AddrMode relative to the address in sp.
--- This gives us a stack relative addressing mode for volatile
--- temporaries and for excess call arguments.
+-- This gives us a stack relative addressing mode for volatile
+-- temporaries and for excess call arguments.
--
-spRel :: Int -- ^ stack offset in words, positive or negative
+spRel :: Int -- ^ stack offset in words, positive or negative
-> AddrMode
-spRel n = AddrRegImm sp (ImmInt (n * wordLength))
+spRel n = AddrRegImm sp (ImmInt (n * wordLength))
-- | Get an address relative to the frame pointer.
--- This doesn't work work for offsets greater than 13 bits; we just hope for the best
+-- This doesn't work work for offsets greater than 13 bits; we just hope for the best
--
fpRel :: Int -> AddrMode
fpRel n
- = AddrRegImm fp (ImmInt (n * wordLength))
+ = AddrRegImm fp (ImmInt (n * wordLength))
-- | Convert a spill slot number to a *byte* offset, with no sign.
--
spillSlotToOffset :: DynFlags -> Int -> Int
spillSlotToOffset dflags slot
- | slot >= 0 && slot < maxSpillSlots dflags
- = 64 + spillSlotSize * slot
+ | slot >= 0 && slot < maxSpillSlots dflags
+ = 64 + spillSlotSize * slot
- | otherwise
- = pprPanic "spillSlotToOffset:"
- ( text "invalid spill location: " <> int slot
- $$ text "maxSpillSlots: " <> int (maxSpillSlots dflags))
+ | otherwise
+ = pprPanic "spillSlotToOffset:"
+ ( text "invalid spill location: " <> int slot
+ $$ text "maxSpillSlots: " <> int (maxSpillSlots dflags))
-- | The maximum number of spill slots available on the C stack.
--- If we use up all of the slots, then we're screwed.
+-- If we use up all of the slots, then we're screwed.
--
--- Why do we reserve 64 bytes, instead of using the whole thing??
--- -- BL 2009/02/15
+-- Why do we reserve 64 bytes, instead of using the whole thing??
+-- -- BL 2009/02/15
--
maxSpillSlots :: DynFlags -> Int
maxSpillSlots dflags
- = ((spillAreaLength dflags - 64) `div` spillSlotSize) - 1
-
+ = ((spillAreaLength dflags - 64) `div` spillSlotSize) - 1
diff --git a/compiler/nativeGen/Size.hs b/compiler/nativeGen/Size.hs
index 45a39645cc2b..8fe590f1e94a 100644
--- a/compiler/nativeGen/Size.hs
+++ b/compiler/nativeGen/Size.hs
@@ -1,22 +1,15 @@
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-- | Sizes on this architecture
--- A Size is a combination of width and class
---
--- TODO: Rename this to "Format" instead of "Size" to reflect
--- the fact that it represents floating point vs integer.
+-- A Size is a combination of width and class
+--
+-- TODO: Rename this to "Format" instead of "Size" to reflect
+-- the fact that it represents floating point vs integer.
--
--- TODO: Signed vs unsigned?
+-- TODO: Signed vs unsigned?
--
--- TODO: This module is currenly shared by all architectures because
--- NCGMonad need to know about it to make a VReg. It would be better
--- to have architecture specific formats, and do the overloading
--- properly. eg SPARC doesn't care about FF80.
+-- TODO: This module is currenly shared by all architectures because
+-- NCGMonad need to know about it to make a VReg. It would be better
+-- to have architecture specific formats, and do the overloading
+-- properly. eg SPARC doesn't care about FF80.
--
module Size (
Size(..),
@@ -37,76 +30,76 @@ import Outputable
-- significance, here in the native code generator. You can change it
-- without global consequences.
--
--- A major use is as an opcode qualifier; thus the opcode
--- mov.l a b
--- might be encoded
--- MOV II32 a b
+-- A major use is as an opcode qualifier; thus the opcode
+-- mov.l a b
+-- might be encoded
+-- MOV II32 a b
-- where the Size field encodes the ".l" part.
-- ToDo: it's not clear to me that we need separate signed-vs-unsigned sizes
--- here. I've removed them from the x86 version, we'll see what happens --SDM
+-- here. I've removed them from the x86 version, we'll see what happens --SDM
-- ToDo: quite a few occurrences of Size could usefully be replaced by Width
data Size
- = II8
- | II16
- | II32
- | II64
- | FF32
- | FF64
- | FF80
- deriving (Show, Eq)
+ = II8
+ | II16
+ | II32
+ | II64
+ | FF32
+ | FF64
+ | FF80
+ deriving (Show, Eq)
-- | Get the integer size of this width.
intSize :: Width -> Size
intSize width
= case width of
- W8 -> II8
- W16 -> II16
- W32 -> II32
- W64 -> II64
- other -> pprPanic "Size.intSize" (ppr other)
+ W8 -> II8
+ W16 -> II16
+ W32 -> II32
+ W64 -> II64
+ other -> pprPanic "Size.intSize" (ppr other)
-- | Get the float size of this width.
floatSize :: Width -> Size
floatSize width
= case width of
- W32 -> FF32
- W64 -> FF64
- other -> pprPanic "Size.floatSize" (ppr other)
+ W32 -> FF32
+ W64 -> FF64
+ other -> pprPanic "Size.floatSize" (ppr other)
-- | Check if a size represents a floating point value.
isFloatSize :: Size -> Bool
isFloatSize size
= case size of
- FF32 -> True
- FF64 -> True
- FF80 -> True
- _ -> False
+ FF32 -> True
+ FF64 -> True
+ FF80 -> True
+ _ -> False
-- | Convert a Cmm type to a Size.
cmmTypeSize :: CmmType -> Size
-cmmTypeSize ty
- | isFloatType ty = floatSize (typeWidth ty)
- | otherwise = intSize (typeWidth ty)
+cmmTypeSize ty
+ | isFloatType ty = floatSize (typeWidth ty)
+ | otherwise = intSize (typeWidth ty)
-- | Get the Width of a Size.
sizeToWidth :: Size -> Width
sizeToWidth size
= case size of
- II8 -> W8
- II16 -> W16
- II32 -> W32
- II64 -> W64
- FF32 -> W32
- FF64 -> W64
- FF80 -> W80
+ II8 -> W8
+ II16 -> W16
+ II32 -> W32
+ II64 -> W64
+ FF32 -> W32
+ FF64 -> W64
+ FF80 -> W80
sizeInBytes :: Size -> Int
sizeInBytes = widthInBytes . sizeToWidth
diff --git a/compiler/nativeGen/TargetReg.hs b/compiler/nativeGen/TargetReg.hs
index 09c774f4d575..96c17777950d 100644
--- a/compiler/nativeGen/TargetReg.hs
+++ b/compiler/nativeGen/TargetReg.hs
@@ -1,28 +1,20 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
-- | Hard wired things related to registers.
--- This is module is preventing the native code generator being able to
--- emit code for non-host architectures.
+-- This is module is preventing the native code generator being able to
+-- emit code for non-host architectures.
--
--- TODO: Do a better job of the overloading, and eliminate this module.
--- We'd probably do better with a Register type class, and hook this to
--- Instruction somehow.
+-- TODO: Do a better job of the overloading, and eliminate this module.
+-- We'd probably do better with a Register type class, and hook this to
+-- Instruction somehow.
--
--- TODO: We should also make arch specific versions of RegAlloc.Graph.TrivColorable
-
+-- TODO: We should also make arch specific versions of RegAlloc.Graph.TrivColorable
module TargetReg (
- targetVirtualRegSqueeze,
- targetRealRegSqueeze,
- targetClassOfRealReg,
- targetMkVirtualReg,
- targetRegDotColor,
- targetClassOfReg
+ targetVirtualRegSqueeze,
+ targetRealRegSqueeze,
+ targetClassOfRealReg,
+ targetMkVirtualReg,
+ targetRegDotColor,
+ targetClassOfReg
)
where
@@ -54,6 +46,7 @@ targetVirtualRegSqueeze platform
ArchSPARC -> SPARC.virtualRegSqueeze
ArchPPC_64 -> panic "targetVirtualRegSqueeze ArchPPC_64"
ArchARM _ _ _ -> panic "targetVirtualRegSqueeze ArchARM"
+ ArchARM64 -> panic "targetVirtualRegSqueeze ArchARM64"
ArchAlpha -> panic "targetVirtualRegSqueeze ArchAlpha"
ArchMipseb -> panic "targetVirtualRegSqueeze ArchMipseb"
ArchMipsel -> panic "targetVirtualRegSqueeze ArchMipsel"
@@ -70,6 +63,7 @@ targetRealRegSqueeze platform
ArchSPARC -> SPARC.realRegSqueeze
ArchPPC_64 -> panic "targetRealRegSqueeze ArchPPC_64"
ArchARM _ _ _ -> panic "targetRealRegSqueeze ArchARM"
+ ArchARM64 -> panic "targetRealRegSqueeze ArchARM64"
ArchAlpha -> panic "targetRealRegSqueeze ArchAlpha"
ArchMipseb -> panic "targetRealRegSqueeze ArchMipseb"
ArchMipsel -> panic "targetRealRegSqueeze ArchMipsel"
@@ -85,6 +79,7 @@ targetClassOfRealReg platform
ArchSPARC -> SPARC.classOfRealReg
ArchPPC_64 -> panic "targetClassOfRealReg ArchPPC_64"
ArchARM _ _ _ -> panic "targetClassOfRealReg ArchARM"
+ ArchARM64 -> panic "targetClassOfRealReg ArchARM64"
ArchAlpha -> panic "targetClassOfRealReg ArchAlpha"
ArchMipseb -> panic "targetClassOfRealReg ArchMipseb"
ArchMipsel -> panic "targetClassOfRealReg ArchMipsel"
@@ -100,6 +95,7 @@ targetMkVirtualReg platform
ArchSPARC -> SPARC.mkVirtualReg
ArchPPC_64 -> panic "targetMkVirtualReg ArchPPC_64"
ArchARM _ _ _ -> panic "targetMkVirtualReg ArchARM"
+ ArchARM64 -> panic "targetMkVirtualReg ArchARM64"
ArchAlpha -> panic "targetMkVirtualReg ArchAlpha"
ArchMipseb -> panic "targetMkVirtualReg ArchMipseb"
ArchMipsel -> panic "targetMkVirtualReg ArchMipsel"
@@ -115,6 +111,7 @@ targetRegDotColor platform
ArchSPARC -> SPARC.regDotColor
ArchPPC_64 -> panic "targetRegDotColor ArchPPC_64"
ArchARM _ _ _ -> panic "targetRegDotColor ArchARM"
+ ArchARM64 -> panic "targetRegDotColor ArchARM64"
ArchAlpha -> panic "targetRegDotColor ArchAlpha"
ArchMipseb -> panic "targetRegDotColor ArchMipseb"
ArchMipsel -> panic "targetRegDotColor ArchMipsel"
@@ -127,5 +124,3 @@ targetClassOfReg platform reg
= case reg of
RegVirtual vr -> classOfVirtualReg vr
RegReal rr -> targetClassOfRealReg platform rr
-
-
diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs
index 2456688744dd..d95a14c31cc0 100644
--- a/compiler/nativeGen/X86/CodeGen.hs
+++ b/compiler/nativeGen/X86/CodeGen.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, GADTs, NondecreasingIndentation #-}
+
-----------------------------------------------------------------------------
--
-- Generating machine code (instruction selection)
@@ -10,7 +12,6 @@
-- (a) the sectioning, and (b) the type signatures, the
-- structure should not be too overwhelming.
-{-# LANGUAGE GADTs #-}
module X86.CodeGen (
cmmTopCodeGen,
generateJumpTableForInstr,
@@ -30,6 +31,7 @@ import X86.Regs
import X86.RegInfo
import CodeGen.Platform
import CPrim
+import Debug ( DebugBlock(..) )
import Instruction
import PIC
import NCGMonad
@@ -40,12 +42,14 @@ import Platform
-- Our intermediate code:
import BasicTypes
import BlockId
-import Module ( primPackageId )
+import Module ( primPackageKey )
import PprCmm ()
import CmmUtils
import Cmm
import Hoopl
import CLabel
+import CoreSyn ( Tickish(..) )
+import SrcLoc ( srcSpanFile, srcSpanStartLine, srcSpanStartCol )
-- The rest:
import ForeignCall ( CCallConv(..) )
@@ -110,11 +114,19 @@ basicBlockCodeGen
, [NatCmmDecl (Alignment, CmmStatics) Instr])
basicBlockCodeGen block = do
- let (CmmEntry id, nodes, tail) = blockSplit block
+ let (_, nodes, tail) = blockSplit block
+ id = entryLabel block
stmts = blockToList nodes
+ -- Generate location directive
+ dbg <- getDebugBlock (entryLabel block)
+ loc_instrs <- case dblSourceTick =<< dbg of
+ Just (SourceNote span name)
+ -> do fileId <- getFileId (srcSpanFile span)
+ return $ unitOL $ LOCATION fileId (srcSpanStartLine span) (srcSpanStartCol span) name
+ _ -> return nilOL
mid_instrs <- stmtsToInstrs stmts
tail_instrs <- stmtToInstrs tail
- let instrs = mid_instrs `appOL` tail_instrs
+ let instrs = loc_instrs `appOL` mid_instrs `appOL` tail_instrs
-- code generation may introduce new basic block boundaries, which
-- are indicated by the NEWBLOCK instruction. We must split up the
-- instruction stream into basic blocks again. Also, we extract
@@ -143,6 +155,8 @@ stmtToInstrs stmt = do
is32Bit <- is32BitPlatform
case stmt of
CmmComment s -> return (unitOL (COMMENT s))
+ CmmTick {} -> return nilOL
+ CmmUnwind {} -> return nilOL
CmmAssign reg src
| isFloatType ty -> assignReg_FltCode size reg src
@@ -159,7 +173,7 @@ stmtToInstrs stmt = do
size = cmmTypeSize ty
CmmUnsafeForeignCall target result_regs args
- -> genCCall is32Bit target result_regs args
+ -> genCCall dflags is32Bit target result_regs args
CmmBranch id -> genBranch id
CmmCondBranch arg true false -> do b1 <- genCondJump true arg
@@ -804,6 +818,8 @@ getRegister' _ is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps
| is32BitInteger y = add_int rep x y
add_code rep x y = trivialCode rep (ADD size) (Just (ADD size)) x y
where size = intSize rep
+ -- TODO: There are other interesting patterns we want to replace
+ -- with a LEA, e.g. `(x + offset) + (y << shift)`.
--------------------
sub_code :: Width -> CmmExpr -> CmmExpr -> NatM Register
@@ -1024,6 +1040,13 @@ getAmode' is32Bit (CmmMachOp (MO_Add rep) [a@(CmmMachOp (MO_Shl _) _),
b@(CmmLit _)])
= getAmode' is32Bit (CmmMachOp (MO_Add rep) [b,a])
+-- Matches: (x + offset) + (y << shift)
+getAmode' _ (CmmMachOp (MO_Add _) [CmmRegOff x offset,
+ CmmMachOp (MO_Shl _)
+ [y, CmmLit (CmmInt shift _)]])
+ | shift == 0 || shift == 1 || shift == 2 || shift == 3
+ = x86_complex_amode (CmmReg x) y shift (fromIntegral offset)
+
getAmode' _ (CmmMachOp (MO_Add _) [x, CmmMachOp (MO_Shl _)
[y, CmmLit (CmmInt shift _)]])
| shift == 0 || shift == 1 || shift == 2 || shift == 3
@@ -1047,6 +1070,18 @@ getAmode' _ expr = do
(reg,code) <- getSomeReg expr
return (Amode (AddrBaseIndex (EABaseReg reg) EAIndexNone (ImmInt 0)) code)
+-- | Like 'getAmode', but on 32-bit use simple register addressing
+-- (i.e. no index register). This stops us from running out of
+-- registers on x86 when using instructions such as cmpxchg, which can
+-- use up to three virtual registers and one fixed register.
+getSimpleAmode :: DynFlags -> Bool -> CmmExpr -> NatM Amode
+getSimpleAmode dflags is32Bit addr
+ | is32Bit = do
+ addr_code <- getAnyReg addr
+ addr_r <- getNewRegNat (intSize (wordWidth dflags))
+ let amode = AddrBaseIndex (EABaseReg addr_r) EAIndexNone (ImmInt 0)
+ return $! Amode amode (addr_code addr_r)
+ | otherwise = getAmode addr
x86_complex_amode :: CmmExpr -> CmmExpr -> Integer -> Integer -> NatM Amode
x86_complex_amode base index shift offset
@@ -1559,7 +1594,8 @@ genCondJump id bool = do
-- register allocator.
genCCall
- :: Bool -- 32 bit platform?
+ :: DynFlags
+ -> Bool -- 32 bit platform?
-> ForeignTarget -- function to call
-> [CmmFormal] -- where to put the result
-> [CmmActual] -- arguments (of mixed type)
@@ -1570,21 +1606,27 @@ genCCall
-- Unroll memcpy calls if the source and destination pointers are at
-- least DWORD aligned and the number of bytes to copy isn't too
-- large. Otherwise, call C's memcpy.
-genCCall is32Bit (PrimTarget MO_Memcpy) _
+genCCall dflags is32Bit (PrimTarget MO_Memcpy) _
[dst, src,
(CmmLit (CmmInt n _)),
(CmmLit (CmmInt align _))]
- | n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
+ | fromInteger insns <= maxInlineMemcpyInsns dflags && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
code_src <- getAnyReg src
src_r <- getNewRegNat size
tmp_r <- getNewRegNat size
return $ code_dst dst_r `appOL` code_src src_r `appOL`
- go dst_r src_r tmp_r n
+ go dst_r src_r tmp_r (fromInteger n)
where
+ -- The number of instructions we will generate (approx). We need 2
+ -- instructions per move.
+ insns = 2 * ((n + sizeBytes - 1) `div` sizeBytes)
+
size = if align .&. 4 /= 0 then II32 else (archWordSize is32Bit)
+ -- The size of each move, in bytes.
+ sizeBytes :: Integer
sizeBytes = fromIntegral (sizeInBytes size)
go :: Reg -> Reg -> Reg -> Integer -> OrdList Instr
@@ -1613,15 +1655,15 @@ genCCall is32Bit (PrimTarget MO_Memcpy) _
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n - i))
-genCCall _ (PrimTarget MO_Memset) _
+genCCall dflags _ (PrimTarget MO_Memset) _
[dst,
CmmLit (CmmInt c _),
CmmLit (CmmInt n _),
CmmLit (CmmInt align _)]
- | n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
+ | fromInteger insns <= maxInlineMemsetInsns dflags && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
- return $ code_dst dst_r `appOL` go dst_r n
+ return $ code_dst dst_r `appOL` go dst_r (fromInteger n)
where
(size, val) = case align .&. 3 of
2 -> (II16, c2)
@@ -1630,6 +1672,12 @@ genCCall _ (PrimTarget MO_Memset) _
c2 = c `shiftL` 8 .|. c
c4 = c2 `shiftL` 16 .|. c2
+ -- The number of instructions we will generate (approx). We need 1
+ -- instructions per move.
+ insns = (n + sizeBytes - 1) `div` sizeBytes
+
+ -- The size of each move, in bytes.
+ sizeBytes :: Integer
sizeBytes = fromIntegral (sizeInBytes size)
go :: Reg -> Integer -> OrdList Instr
@@ -1652,13 +1700,13 @@ genCCall _ (PrimTarget MO_Memset) _
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n - i))
-genCCall _ (PrimTarget MO_WriteBarrier) _ _ = return nilOL
+genCCall _ _ (PrimTarget MO_WriteBarrier) _ _ = return nilOL
-- write barrier compiles to no code on x86/x86-64;
-- we keep it this long in order to prevent earlier optimisations.
-genCCall _ (PrimTarget MO_Touch) _ _ = return nilOL
+genCCall _ _ (PrimTarget MO_Touch) _ _ = return nilOL
-genCCall is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] =
+genCCall _ is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] =
case n of
0 -> genPrefetch src $ PREFETCH NTA size
1 -> genPrefetch src $ PREFETCH Lvl2 size
@@ -1679,8 +1727,7 @@ genCCall is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] =
((AddrBaseIndex (EABaseReg src_r ) EAIndexNone (ImmInt 0)))) ))
-- prefetch always takes an address
-genCCall is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] = do
- dflags <- getDynFlags
+genCCall dflags is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] = do
let platform = targetPlatform dflags
let dst_r = getRegisterReg platform False (CmmLocal dst)
case width of
@@ -1702,54 +1749,201 @@ genCCall is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] = do
where
size = intSize width
-genCCall is32Bit (PrimTarget (MO_PopCnt width)) dest_regs@[dst]
+genCCall dflags is32Bit (PrimTarget (MO_PopCnt width)) dest_regs@[dst]
args@[src] = do
sse4_2 <- sse4_2Enabled
- dflags <- getDynFlags
let platform = targetPlatform dflags
if sse4_2
then do code_src <- getAnyReg src
src_r <- getNewRegNat size
+ let dst_r = getRegisterReg platform False (CmmLocal dst)
return $ code_src src_r `appOL`
(if width == W8 then
-- The POPCNT instruction doesn't take a r/m8
unitOL (MOVZxL II8 (OpReg src_r) (OpReg src_r)) `appOL`
- unitOL (POPCNT II16 (OpReg src_r)
- (getRegisterReg platform False (CmmLocal dst)))
+ unitOL (POPCNT II16 (OpReg src_r) dst_r)
else
- unitOL (POPCNT size (OpReg src_r)
- (getRegisterReg platform False (CmmLocal dst))))
+ unitOL (POPCNT size (OpReg src_r) dst_r)) `appOL`
+ (if width == W8 || width == W16 then
+ -- We used a 16-bit destination register above,
+ -- so zero-extend
+ unitOL (MOVZxL II16 (OpReg dst_r) (OpReg dst_r))
+ else nilOL)
else do
targetExpr <- cmmMakeDynamicReference dflags
CallReference lbl
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args
where
size = intSize width
- lbl = mkCmmCodeLabel primPackageId (fsLit (popCntLabel width))
+ lbl = mkCmmCodeLabel primPackageKey (fsLit (popCntLabel width))
-genCCall is32Bit (PrimTarget (MO_UF_Conv width)) dest_regs args = do
- dflags <- getDynFlags
+genCCall dflags is32Bit (PrimTarget (MO_Clz width)) dest_regs@[dst] args@[src]
+ | is32Bit && width == W64 = do
+ -- Fallback to `hs_clz64` on i386
+ targetExpr <- cmmMakeDynamicReference dflags CallReference lbl
+ let target = ForeignTarget targetExpr (ForeignConvention CCallConv
+ [NoHint] [NoHint]
+ CmmMayReturn)
+ genCCall dflags is32Bit target dest_regs args
+
+ | otherwise = do
+ code_src <- getAnyReg src
+ src_r <- getNewRegNat size
+ tmp_r <- getNewRegNat size
+ let dst_r = getRegisterReg platform False (CmmLocal dst)
+
+ -- The following insn sequence makes sure 'clz 0' has a defined value.
+ -- starting with Haswell, one could use the LZCNT insn instead.
+ return $ code_src src_r `appOL` toOL
+ ([ MOVZxL II8 (OpReg src_r) (OpReg src_r) | width == W8 ] ++
+ [ BSR size (OpReg src_r) tmp_r
+ , MOV II32 (OpImm (ImmInt (2*bw-1))) (OpReg dst_r)
+ , CMOV NE size (OpReg tmp_r) dst_r
+ , XOR size (OpImm (ImmInt (bw-1))) (OpReg dst_r)
+ ]) -- NB: We don't need to zero-extend the result for the
+ -- W8/W16 cases because the 'MOV' insn already
+ -- took care of implicitly clearing the upper bits
+ where
+ bw = widthInBits width
+ platform = targetPlatform dflags
+ size = if width == W8 then II16 else intSize width
+ lbl = mkCmmCodeLabel primPackageKey (fsLit (clzLabel width))
+
+genCCall dflags is32Bit (PrimTarget (MO_Ctz width)) dest_regs@[dst] args@[src]
+ | is32Bit, width == W64 = do
+ -- Fallback to `hs_ctz64` on i386
+ targetExpr <- cmmMakeDynamicReference dflags CallReference lbl
+ let target = ForeignTarget targetExpr (ForeignConvention CCallConv
+ [NoHint] [NoHint]
+ CmmMayReturn)
+ genCCall dflags is32Bit target dest_regs args
+
+ | otherwise = do
+ code_src <- getAnyReg src
+ src_r <- getNewRegNat size
+ tmp_r <- getNewRegNat size
+ let dst_r = getRegisterReg platform False (CmmLocal dst)
+
+ -- The following insn sequence makes sure 'ctz 0' has a defined value.
+ -- starting with Haswell, one could use the TZCNT insn instead.
+ return $ code_src src_r `appOL` toOL
+ ([ MOVZxL II8 (OpReg src_r) (OpReg src_r) | width == W8 ] ++
+ [ BSF size (OpReg src_r) tmp_r
+ , MOV II32 (OpImm (ImmInt bw)) (OpReg dst_r)
+ , CMOV NE size (OpReg tmp_r) dst_r
+ ]) -- NB: We don't need to zero-extend the result for the
+ -- W8/W16 cases because the 'MOV' insn already
+ -- took care of implicitly clearing the upper bits
+ where
+ bw = widthInBits width
+ platform = targetPlatform dflags
+ size = if width == W8 then II16 else intSize width
+ lbl = mkCmmCodeLabel primPackageKey (fsLit (ctzLabel width))
+
+genCCall dflags is32Bit (PrimTarget (MO_UF_Conv width)) dest_regs args = do
targetExpr <- cmmMakeDynamicReference dflags
CallReference lbl
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args
+ where
+ lbl = mkCmmCodeLabel primPackageKey (fsLit (word2FloatLabel width))
+
+genCCall dflags is32Bit (PrimTarget (MO_AtomicRMW width amop)) [dst] [addr, n] = do
+ Amode amode addr_code <-
+ if amop `elem` [AMO_Add, AMO_Sub]
+ then getAmode addr
+ else getSimpleAmode dflags is32Bit addr -- See genCCall for MO_Cmpxchg
+ arg <- getNewRegNat size
+ arg_code <- getAnyReg n
+ use_sse2 <- sse2Enabled
+ let platform = targetPlatform dflags
+ dst_r = getRegisterReg platform use_sse2 (CmmLocal dst)
+ code <- op_code dst_r arg amode
+ return $ addr_code `appOL` arg_code arg `appOL` code
where
- lbl = mkCmmCodeLabel primPackageId (fsLit (word2FloatLabel width))
+ -- Code for the operation
+ op_code :: Reg -- Destination reg
+ -> Reg -- Register containing argument
+ -> AddrMode -- Address of location to mutate
+ -> NatM (OrdList Instr)
+ op_code dst_r arg amode = case amop of
+ -- In the common case where dst_r is a virtual register the
+ -- final move should go away, because it's the last use of arg
+ -- and the first use of dst_r.
+ AMO_Add -> return $ toOL [ LOCK (XADD size (OpReg arg) (OpAddr amode))
+ , MOV size (OpReg arg) (OpReg dst_r)
+ ]
+ AMO_Sub -> return $ toOL [ NEGI size (OpReg arg)
+ , LOCK (XADD size (OpReg arg) (OpAddr amode))
+ , MOV size (OpReg arg) (OpReg dst_r)
+ ]
+ AMO_And -> cmpxchg_code (\ src dst -> unitOL $ AND size src dst)
+ AMO_Nand -> cmpxchg_code (\ src dst -> toOL [ AND size src dst
+ , NOT size dst
+ ])
+ AMO_Or -> cmpxchg_code (\ src dst -> unitOL $ OR size src dst)
+ AMO_Xor -> cmpxchg_code (\ src dst -> unitOL $ XOR size src dst)
+ where
+ -- Simulate operation that lacks a dedicated instruction using
+ -- cmpxchg.
+ cmpxchg_code :: (Operand -> Operand -> OrdList Instr)
+ -> NatM (OrdList Instr)
+ cmpxchg_code instrs = do
+ lbl <- getBlockIdNat
+ tmp <- getNewRegNat size
+ return $ toOL
+ [ MOV size (OpAddr amode) (OpReg eax)
+ , JXX ALWAYS lbl
+ , NEWBLOCK lbl
+ -- Keep old value so we can return it:
+ , MOV size (OpReg eax) (OpReg dst_r)
+ , MOV size (OpReg eax) (OpReg tmp)
+ ]
+ `appOL` instrs (OpReg arg) (OpReg tmp) `appOL` toOL
+ [ LOCK (CMPXCHG size (OpReg tmp) (OpAddr amode))
+ , JXX NE lbl
+ ]
-genCCall is32Bit target dest_regs args
- | is32Bit = genCCall32 target dest_regs args
- | otherwise = genCCall64 target dest_regs args
+ size = intSize width
-genCCall32 :: ForeignTarget -- function to call
- -> [CmmFormal] -- where to put the result
- -> [CmmActual] -- arguments (of mixed type)
- -> NatM InstrBlock
-genCCall32 target dest_regs args = do
+genCCall dflags _ (PrimTarget (MO_AtomicRead width)) [dst] [addr] = do
+ load_code <- intLoadCode (MOV (intSize width)) addr
+ let platform = targetPlatform dflags
+ use_sse2 <- sse2Enabled
+ return (load_code (getRegisterReg platform use_sse2 (CmmLocal dst)))
+
+genCCall _ _ (PrimTarget (MO_AtomicWrite width)) [] [addr, val] = do
+ code <- assignMem_IntCode (intSize width) addr val
+ return $ code `snocOL` MFENCE
+
+genCCall dflags is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] = do
+ -- On x86 we don't have enough registers to use cmpxchg with a
+ -- complicated addressing mode, so on that architecture we
+ -- pre-compute the address first.
+ Amode amode addr_code <- getSimpleAmode dflags is32Bit addr
+ newval <- getNewRegNat size
+ newval_code <- getAnyReg new
+ oldval <- getNewRegNat size
+ oldval_code <- getAnyReg old
+ use_sse2 <- sse2Enabled
+ let platform = targetPlatform dflags
+ dst_r = getRegisterReg platform use_sse2 (CmmLocal dst)
+ code = toOL
+ [ MOV size (OpReg oldval) (OpReg eax)
+ , LOCK (CMPXCHG size (OpReg newval) (OpAddr amode))
+ , MOV size (OpReg eax) (OpReg dst_r)
+ ]
+ return $ addr_code `appOL` newval_code newval `appOL` oldval_code oldval
+ `appOL` code
+ where
+ size = intSize width
+
+genCCall _ is32Bit target dest_regs args = do
dflags <- getDynFlags
let platform = targetPlatform dflags
case (target, dest_regs) of
@@ -1757,7 +1951,9 @@ genCCall32 target dest_regs args = do
(PrimTarget op, []) ->
outOfLineCmmOp op Nothing args
-- we only cope with a single result for foreign calls
- (PrimTarget op, [r]) -> do
+ (PrimTarget op, [r])
+ | not is32Bit -> outOfLineCmmOp op (Just r) args
+ | otherwise -> do
l1 <- getNewLabelNat
l2 <- getNewLabelNat
sse2 <- sse2Enabled
@@ -1786,7 +1982,7 @@ genCCall32 target dest_regs args = do
return (any (getRegisterReg platform False (CmmLocal r)))
actuallyInlineFloatOp _ _ args
- = panic $ "genCCall32.actuallyInlineFloatOp: bad number of arguments! ("
+ = panic $ "genCCall.actuallyInlineFloatOp: bad number of arguments! ("
++ show (length args) ++ ")"
(PrimTarget (MO_S_QuotRem width), _) -> divOp1 platform True width dest_regs args
@@ -1796,15 +1992,16 @@ genCCall32 target dest_regs args = do
case args of
[arg_x, arg_y] ->
do hCode <- getAnyReg (CmmLit (CmmInt 0 width))
- lCode <- getAnyReg (CmmMachOp (MO_Add width) [arg_x, arg_y])
let size = intSize width
- reg_l = getRegisterReg platform True (CmmLocal res_l)
+ lCode <- anyReg =<< trivialCode width (ADD_CC size)
+ (Just (ADD_CC size)) arg_x arg_y
+ let reg_l = getRegisterReg platform True (CmmLocal res_l)
reg_h = getRegisterReg platform True (CmmLocal res_h)
code = hCode reg_h `appOL`
lCode reg_l `snocOL`
ADC size (OpImm (ImmInteger 0)) (OpReg reg_h)
return code
- _ -> panic "genCCall32: Wrong number of arguments/results for add2"
+ _ -> panic "genCCall: Wrong number of arguments/results for add2"
(PrimTarget (MO_U_Mul2 width), [res_h, res_l]) ->
case args of
[arg_x, arg_y] ->
@@ -1819,18 +2016,20 @@ genCCall32 target dest_regs args = do
MOV size (OpReg rdx) (OpReg reg_h),
MOV size (OpReg rax) (OpReg reg_l)]
return code
- _ -> panic "genCCall32: Wrong number of arguments/results for add2"
+ _ -> panic "genCCall: Wrong number of arguments/results for add2"
- _ -> genCCall32' dflags target dest_regs args
+ _ -> if is32Bit
+ then genCCall32' dflags target dest_regs args
+ else genCCall64' dflags target dest_regs args
where divOp1 platform signed width results [arg_x, arg_y]
= divOp platform signed width results Nothing arg_x arg_y
divOp1 _ _ _ _ _
- = panic "genCCall32: Wrong number of arguments for divOp1"
+ = panic "genCCall: Wrong number of arguments for divOp1"
divOp2 platform signed width results [arg_x_high, arg_x_low, arg_y]
= divOp platform signed width results (Just arg_x_high) arg_x_low arg_y
divOp2 _ _ _ _ _
- = panic "genCCall64: Wrong number of arguments for divOp2"
+ = panic "genCCall: Wrong number of arguments for divOp2"
divOp platform signed width [res_q, res_r]
m_arg_x_high arg_x_low arg_y
= do let size = intSize width
@@ -1854,7 +2053,7 @@ genCCall32 target dest_regs args = do
MOV size (OpReg rax) (OpReg reg_q),
MOV size (OpReg rdx) (OpReg reg_r)]
divOp _ _ _ _ _ _ _
- = panic "genCCall32: Wrong number of results for divOp"
+ = panic "genCCall: Wrong number of results for divOp"
genCCall32' :: DynFlags
-> ForeignTarget -- function to call
@@ -2009,90 +2208,6 @@ genCCall32' dflags target dest_regs args = do
arg_ty = cmmExprType dflags arg
size = arg_size arg_ty -- Byte size
-genCCall64 :: ForeignTarget -- function to call
- -> [CmmFormal] -- where to put the result
- -> [CmmActual] -- arguments (of mixed type)
- -> NatM InstrBlock
-genCCall64 target dest_regs args = do
- dflags <- getDynFlags
- let platform = targetPlatform dflags
- case (target, dest_regs) of
-
- (PrimTarget op, []) ->
- -- void return type prim op
- outOfLineCmmOp op Nothing args
-
- (PrimTarget op, [res]) ->
- -- we only cope with a single result for foreign calls
- outOfLineCmmOp op (Just res) args
-
- (PrimTarget (MO_S_QuotRem width), _) -> divOp1 platform True width dest_regs args
- (PrimTarget (MO_U_QuotRem width), _) -> divOp1 platform False width dest_regs args
- (PrimTarget (MO_U_QuotRem2 width), _) -> divOp2 platform False width dest_regs args
- (PrimTarget (MO_Add2 width), [res_h, res_l]) ->
- case args of
- [arg_x, arg_y] ->
- do hCode <- getAnyReg (CmmLit (CmmInt 0 width))
- lCode <- getAnyReg (CmmMachOp (MO_Add width) [arg_x, arg_y])
- let size = intSize width
- reg_l = getRegisterReg platform True (CmmLocal res_l)
- reg_h = getRegisterReg platform True (CmmLocal res_h)
- code = hCode reg_h `appOL`
- lCode reg_l `snocOL`
- ADC size (OpImm (ImmInteger 0)) (OpReg reg_h)
- return code
- _ -> panic "genCCall64: Wrong number of arguments/results for add2"
- (PrimTarget (MO_U_Mul2 width), [res_h, res_l]) ->
- case args of
- [arg_x, arg_y] ->
- do (y_reg, y_code) <- getRegOrMem arg_y
- x_code <- getAnyReg arg_x
- let size = intSize width
- reg_h = getRegisterReg platform True (CmmLocal res_h)
- reg_l = getRegisterReg platform True (CmmLocal res_l)
- code = y_code `appOL`
- x_code rax `appOL`
- toOL [MUL2 size y_reg,
- MOV size (OpReg rdx) (OpReg reg_h),
- MOV size (OpReg rax) (OpReg reg_l)]
- return code
- _ -> panic "genCCall64: Wrong number of arguments/results for add2"
-
- _ ->
- do dflags <- getDynFlags
- genCCall64' dflags target dest_regs args
-
- where divOp1 platform signed width results [arg_x, arg_y]
- = divOp platform signed width results Nothing arg_x arg_y
- divOp1 _ _ _ _ _
- = panic "genCCall64: Wrong number of arguments for divOp1"
- divOp2 platform signed width results [arg_x_high, arg_x_low, arg_y]
- = divOp platform signed width results (Just arg_x_high) arg_x_low arg_y
- divOp2 _ _ _ _ _
- = panic "genCCall64: Wrong number of arguments for divOp2"
- divOp platform signed width [res_q, res_r]
- m_arg_x_high arg_x_low arg_y
- = do let size = intSize width
- reg_q = getRegisterReg platform True (CmmLocal res_q)
- reg_r = getRegisterReg platform True (CmmLocal res_r)
- widen | signed = CLTD size
- | otherwise = XOR size (OpReg rdx) (OpReg rdx)
- instr | signed = IDIV
- | otherwise = DIV
- (y_reg, y_code) <- getRegOrMem arg_y
- x_low_code <- getAnyReg arg_x_low
- x_high_code <- case m_arg_x_high of
- Just arg_x_high -> getAnyReg arg_x_high
- Nothing -> return $ const $ unitOL widen
- return $ y_code `appOL`
- x_low_code rax `appOL`
- x_high_code rdx `appOL`
- toOL [instr size y_reg,
- MOV size (OpReg rax) (OpReg reg_q),
- MOV size (OpReg rdx) (OpReg reg_r)]
- divOp _ _ _ _ _ _ _
- = panic "genCCall64: Wrong number of results for divOp"
-
genCCall64' :: DynFlags
-> ForeignTarget -- function to call
-> [CmmFormal] -- where to put the result
@@ -2304,12 +2419,6 @@ maybePromoteCArg dflags wto arg
where
wfrom = cmmExprWidth dflags arg
--- | We're willing to inline and unroll memcpy/memset calls that touch
--- at most these many bytes. This threshold is the same as the one
--- used by GCC and LLVM.
-maxInlineSizeThreshold :: Integer
-maxInlineSizeThreshold = 128
-
outOfLineCmmOp :: CallishMachOp -> Maybe CmmFormal -> [CmmActual] -> NatM InstrBlock
outOfLineCmmOp mop res args
= do
@@ -2370,6 +2479,13 @@ outOfLineCmmOp mop res args
MO_PopCnt _ -> fsLit "popcnt"
MO_BSwap _ -> fsLit "bswap"
+ MO_Clz w -> fsLit $ clzLabel w
+ MO_Ctz w -> fsLit $ ctzLabel w
+
+ MO_AtomicRMW _ _ -> fsLit "atomicrmw"
+ MO_AtomicRead _ -> fsLit "atomicread"
+ MO_AtomicWrite _ -> fsLit "atomicwrite"
+ MO_Cmpxchg _ -> fsLit "cmpxchg"
MO_UF_Conv _ -> unsupported
diff --git a/compiler/nativeGen/X86/Instr.hs b/compiler/nativeGen/X86/Instr.hs
index d10591e37f8e..a6e63f7d52f7 100644
--- a/compiler/nativeGen/X86/Instr.hs
+++ b/compiler/nativeGen/X86/Instr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP, TypeFamilies #-}
+
-----------------------------------------------------------------------------
--
-- Machine-dependent assembly language
@@ -6,15 +8,15 @@
--
-----------------------------------------------------------------------------
-#include "HsVersions.h"
-#include "nativeGen/NCG.h"
-
module X86.Instr (Instr(..), Operand(..), PrefetchVariant(..), JumpDest,
getJumpDestBlockId, canShortcut, shortcutStatics,
shortcutJump, i386_insert_ffrees, allocMoreStack,
maxSpillSlots, archWordSize)
where
+#include "HsVersions.h"
+#include "nativeGen/NCG.h"
+
import X86.Cond
import X86.Regs
import Instruction
@@ -163,6 +165,9 @@ data Instr
-- comment pseudo-op
= COMMENT FastString
+ -- location pseudo-op (file, line, col, name)
+ | LOCATION Int Int Int String
+
-- some static data spat out during code
-- generation. Will be extracted before
-- pretty-printing.
@@ -180,6 +185,7 @@ data Instr
-- Moves.
| MOV Size Operand Operand
+ | CMOV Cond Size Operand Reg
| MOVZxL Size Operand Operand -- size is the size of operand 1
| MOVSxL Size Operand Operand -- size is the size of operand 1
-- x86_64 note: plain mov into a 32-bit register always zero-extends
@@ -202,6 +208,12 @@ data Instr
| DIV Size Operand -- eax := eax:edx/op, edx := eax:edx%op
| IDIV Size Operand -- ditto, but signed
+ -- Int Arithmetic, where the effects on the condition register
+ -- are important. Used in specialized sequences such as MO_Add2.
+ -- Do not rewrite these instructions to "equivalent" ones that
+ -- have different effect on the condition register! (See #9013.)
+ | ADD_CC Size Operand Operand
+
-- Simple bit-twiddling.
| AND Size Operand Operand
| OR Size Operand Operand
@@ -318,13 +330,20 @@ data Instr
-- call 1f
-- 1: popl %reg
- -- SSE4.2
- | POPCNT Size Operand Reg -- src, dst
+ -- bit counting instructions
+ | POPCNT Size Operand Reg -- [SSE4.2] count number of bits set to 1
+ | BSF Size Operand Reg -- bit scan forward
+ | BSR Size Operand Reg -- bit scan reverse
-- prefetch
| PREFETCH PrefetchVariant Size Operand -- prefetch Variant, addr size, address to prefetch
-- variant can be NTA, Lvl0, Lvl1, or Lvl2
+ | LOCK Instr -- lock prefix
+ | XADD Size Operand Operand -- src (r), dst (r/m)
+ | CMPXCHG Size Operand Operand -- src (r), dst (r/m), eax implicit
+ | MFENCE
+
data PrefetchVariant = NTA | Lvl0 | Lvl1 | Lvl2
@@ -335,10 +354,13 @@ data Operand
+-- | Returns which registers are read and written as a (read, written)
+-- pair.
x86_regUsageOfInstr :: Platform -> Instr -> RegUsage
x86_regUsageOfInstr platform instr
= case instr of
MOV _ src dst -> usageRW src dst
+ CMOV _ _ src dst -> mkRU (use_R src [dst]) [dst]
MOVZxL _ src dst -> usageRW src dst
MOVSxL _ src dst -> usageRW src dst
LEA _ src dst -> usageRW src dst
@@ -351,6 +373,7 @@ x86_regUsageOfInstr platform instr
MUL2 _ src -> mkRU (eax:use_R src []) [eax,edx]
DIV _ op -> mkRU (eax:edx:use_R op []) [eax,edx]
IDIV _ op -> mkRU (eax:edx:use_R op []) [eax,edx]
+ ADD_CC _ src dst -> usageRM src dst
AND _ src dst -> usageRM src dst
OR _ src dst -> usageRM src dst
@@ -420,16 +443,31 @@ x86_regUsageOfInstr platform instr
FETCHPC reg -> mkRU [] [reg]
COMMENT _ -> noUsage
+ LOCATION{} -> noUsage
DELTA _ -> noUsage
POPCNT _ src dst -> mkRU (use_R src []) [dst]
+ BSF _ src dst -> mkRU (use_R src []) [dst]
+ BSR _ src dst -> mkRU (use_R src []) [dst]
-- note: might be a better way to do this
PREFETCH _ _ src -> mkRU (use_R src []) []
+ LOCK i -> x86_regUsageOfInstr platform i
+ XADD _ src dst -> usageMM src dst
+ CMPXCHG _ src dst -> usageRMM src dst (OpReg eax)
+ MFENCE -> noUsage
_other -> panic "regUsage: unrecognised instr"
-
where
+ -- # Definitions
+ --
+ -- Written: If the operand is a register, it's written. If it's an
+ -- address, registers mentioned in the address are read.
+ --
+ -- Modified: If the operand is a register, it's both read and
+ -- written. If it's an address, registers mentioned in the address
+ -- are read.
+
-- 2 operand form; first operand Read; second Written
usageRW :: Operand -> Operand -> RegUsage
usageRW op (OpReg reg) = mkRU (use_R op []) [reg]
@@ -442,6 +480,18 @@ x86_regUsageOfInstr platform instr
usageRM op (OpAddr ea) = mkRUR (use_R op $! use_EA ea [])
usageRM _ _ = panic "X86.RegInfo.usageRM: no match"
+ -- 2 operand form; first operand Modified; second Modified
+ usageMM :: Operand -> Operand -> RegUsage
+ usageMM (OpReg src) (OpReg dst) = mkRU [src, dst] [src, dst]
+ usageMM (OpReg src) (OpAddr ea) = mkRU (use_EA ea [src]) [src]
+ usageMM _ _ = panic "X86.RegInfo.usageMM: no match"
+
+ -- 3 operand form; first operand Read; second Modified; third Modified
+ usageRMM :: Operand -> Operand -> Operand -> RegUsage
+ usageRMM (OpReg src) (OpReg dst) (OpReg reg) = mkRU [src, dst, reg] [dst, reg]
+ usageRMM (OpReg src) (OpAddr ea) (OpReg reg) = mkRU (use_EA ea [src, reg]) [reg]
+ usageRMM _ _ _ = panic "X86.RegInfo.usageRMM: no match"
+
-- 1 operand form; operand Modified
usageM :: Operand -> RegUsage
usageM (OpReg reg) = mkRU [reg] [reg]
@@ -474,6 +524,7 @@ x86_regUsageOfInstr platform instr
where src' = filter (interesting platform) src
dst' = filter (interesting platform) dst
+-- | Is this register interesting for the register allocator?
interesting :: Platform -> Reg -> Bool
interesting _ (RegVirtual _) = True
interesting platform (RegReal (RealRegSingle i)) = isFastTrue (freeReg platform i)
@@ -481,10 +532,13 @@ interesting _ (RegReal (RealRegPair{})) = panic "X86.interesting: no re
+-- | Applies the supplied function to all registers in instructions.
+-- Typically used to change virtual registers to real registers.
x86_patchRegsOfInstr :: Instr -> (Reg -> Reg) -> Instr
x86_patchRegsOfInstr instr env
= case instr of
MOV sz src dst -> patch2 (MOV sz) src dst
+ CMOV cc sz src dst -> CMOV cc sz (patchOp src) (env dst)
MOVZxL sz src dst -> patch2 (MOVZxL sz) src dst
MOVSxL sz src dst -> patch2 (MOVSxL sz) src dst
LEA sz src dst -> patch2 (LEA sz) src dst
@@ -497,6 +551,7 @@ x86_patchRegsOfInstr instr env
MUL2 sz src -> patch1 (MUL2 sz) src
IDIV sz op -> patch1 (IDIV sz) op
DIV sz op -> patch1 (DIV sz) op
+ ADD_CC sz src dst -> patch2 (ADD_CC sz) src dst
AND sz src dst -> patch2 (AND sz) src dst
OR sz src dst -> patch2 (OR sz) src dst
XOR sz src dst -> patch2 (XOR sz) src dst
@@ -559,6 +614,7 @@ x86_patchRegsOfInstr instr env
NOP -> instr
COMMENT _ -> instr
+ LOCATION {} -> instr
DELTA _ -> instr
JXX _ _ -> instr
@@ -566,9 +622,16 @@ x86_patchRegsOfInstr instr env
CLTD _ -> instr
POPCNT sz src dst -> POPCNT sz (patchOp src) (env dst)
+ BSF sz src dst -> BSF sz (patchOp src) (env dst)
+ BSR sz src dst -> BSR sz (patchOp src) (env dst)
PREFETCH lvl size src -> PREFETCH lvl size (patchOp src)
+ LOCK i -> LOCK (x86_patchRegsOfInstr i env)
+ XADD sz src dst -> patch2 (XADD sz) src dst
+ CMPXCHG sz src dst -> patch2 (CMPXCHG sz) src dst
+ MFENCE -> instr
+
_other -> panic "patchRegs: unrecognised instr"
where
@@ -712,6 +775,7 @@ x86_isMetaInstr
x86_isMetaInstr instr
= case instr of
COMMENT{} -> True
+ LOCATION{} -> True
LDATA{} -> True
NEWBLOCK{} -> True
DELTA{} -> True
@@ -788,7 +852,7 @@ i386_insert_ffrees
-> [GenBasicBlock Instr]
i386_insert_ffrees blocks
- | or (map (any is_G_instr) [ instrs | BasicBlock _ instrs <- blocks ])
+ | any (any is_G_instr) [ instrs | BasicBlock _ instrs <- blocks ]
= map insertGFREEs blocks
| otherwise
= blocks
diff --git a/compiler/nativeGen/X86/Ppr.hs b/compiler/nativeGen/X86/Ppr.hs
index f38a04d06935..b35a1b3d2766 100644
--- a/compiler/nativeGen/X86/Ppr.hs
+++ b/compiler/nativeGen/X86/Ppr.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Pretty-printing assembly language
@@ -63,6 +65,7 @@ pprNatCmmDecl proc@(CmmProc top_info lbl _ (ListGraph blocks)) =
pprSectionHeader Text $$
pprLabel lbl $$ -- blocks guaranteed not null, so label needed
vcat (map (pprBasicBlock top_info) blocks) $$
+ ppr (mkAsmTempEndLabel lbl) <> char ':' $$
pprSizeDecl lbl
Just (Statics info_lbl _) ->
@@ -87,6 +90,7 @@ pprNatCmmDecl proc@(CmmProc top_info lbl _ (ListGraph blocks)) =
<+> char '-'
<+> ppr (mkDeadStripPreventer info_lbl)
else empty) $$
+ ppr (mkAsmTempEndLabel info_lbl) <> char ':' $$
pprSizeDecl info_lbl
-- | Output the ELF .size directive.
@@ -101,15 +105,23 @@ pprSizeDecl lbl
pprBasicBlock :: BlockEnv CmmStatics -> NatBasicBlock Instr -> SDoc
pprBasicBlock info_env (BasicBlock blockid instrs)
= maybe_infotable $$
- pprLabel (mkAsmTempLabel (getUnique blockid)) $$
- vcat (map pprInstr instrs)
+ pprLabel asmLbl $$
+ vcat (map pprInstr instrs) $$
+ ppr (mkAsmTempEndLabel asmLbl) <> char ':'
where
+ asmLbl = mkAsmTempLabel (getUnique blockid)
maybe_infotable = case mapLookup blockid info_env of
Nothing -> empty
Just (Statics info_lbl info) ->
pprSectionHeader Text $$
+ infoTableLoc $$
vcat (map pprData info) $$
pprLabel info_lbl
+ -- Make sure the info table has the right .loc for the block
+ -- coming right after it. See [Note: Info Offset]
+ infoTableLoc = case instrs of
+ (l@LOCATION{} : _) -> pprInstr l
+ _other -> empty
pprDatas :: (Alignment, CmmStatics) -> SDoc
pprDatas (align, (Statics lbl dats))
@@ -489,6 +501,11 @@ pprInstr (COMMENT _) = empty -- nuke 'em
{-
pprInstr (COMMENT s) = ptext (sLit "# ") <> ftext s
-}
+
+pprInstr (LOCATION file line col name)
+ = ptext (sLit "\t.loc ") <> ppr file <+> ppr line <+> ppr col <>
+ ptext (sLit " /* ") <> text name <> ptext (sLit " */")
+
pprInstr (DELTA d)
= pprInstr (COMMENT (mkFastString ("\tdelta = " ++ show d)))
@@ -519,6 +536,9 @@ pprInstr (RELOAD slot reg)
pprInstr (MOV size src dst)
= pprSizeOpOp (sLit "mov") size src dst
+pprInstr (CMOV cc size src dst)
+ = pprCondOpReg (sLit "cmov") size cc src dst
+
pprInstr (MOVZxL II32 src dst) = pprSizeOpOp (sLit "mov") II32 src dst
-- 32-to-64 bit zero extension on x86_64 is accomplished by a simple
-- movl. But we represent it as a MOVZxL instruction, because
@@ -561,6 +581,9 @@ pprInstr (ADC size src dst)
pprInstr (SUB size src dst) = pprSizeOpOp (sLit "sub") size src dst
pprInstr (IMUL size op1 op2) = pprSizeOpOp (sLit "imul") size op1 op2
+pprInstr (ADD_CC size src dst)
+ = pprSizeOpOp (sLit "add") size src dst
+
{- A hack. The Intel documentation says that "The two and three
operand forms [of IMUL] may also be used with unsigned operands
because the lower half of the product is the same regardless if
@@ -576,6 +599,8 @@ pprInstr (XOR FF64 src dst) = pprOpOp (sLit "xorpd") FF64 src dst
pprInstr (XOR size src dst) = pprSizeOpOp (sLit "xor") size src dst
pprInstr (POPCNT size src dst) = pprOpOp (sLit "popcnt") size src (OpReg dst)
+pprInstr (BSF size src dst) = pprOpOp (sLit "bsf") size src (OpReg dst)
+pprInstr (BSR size src dst) = pprOpOp (sLit "bsr") size src (OpReg dst)
pprInstr (PREFETCH NTA size src ) = pprSizeOp_ (sLit "prefetchnta") size src
pprInstr (PREFETCH Lvl0 size src) = pprSizeOp_ (sLit "prefetcht0") size src
@@ -884,6 +909,16 @@ pprInstr GFREE
ptext (sLit "\tffree %st(4) ;ffree %st(5)")
]
+-- Atomics
+
+pprInstr (LOCK i) = ptext (sLit "\tlock") $$ pprInstr i
+
+pprInstr MFENCE = ptext (sLit "\tmfence")
+
+pprInstr (XADD size src dst) = pprSizeOpOp (sLit "xadd") size src dst
+
+pprInstr (CMPXCHG size src dst) = pprSizeOpOp (sLit "cmpxchg") size src dst
+
pprInstr _
= panic "X86.Ppr.pprInstr: no match"
@@ -1104,6 +1139,18 @@ pprSizeOpReg name size op1 reg2
pprReg (archWordSize (target32Bit platform)) reg2
]
+pprCondOpReg :: LitString -> Size -> Cond -> Operand -> Reg -> SDoc
+pprCondOpReg name size cond op1 reg2
+ = hcat [
+ char '\t',
+ ptext name,
+ pprCond cond,
+ space,
+ pprOperand size op1,
+ comma,
+ pprReg size reg2
+ ]
+
pprCondRegReg :: LitString -> Size -> Cond -> Reg -> Reg -> SDoc
pprCondRegReg name size cond reg1 reg2
= hcat [
diff --git a/compiler/nativeGen/X86/RegInfo.hs b/compiler/nativeGen/X86/RegInfo.hs
index 8c63933c5bc2..39535634d734 100644
--- a/compiler/nativeGen/X86/RegInfo.hs
+++ b/compiler/nativeGen/X86/RegInfo.hs
@@ -1,14 +1,7 @@
-
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module X86.RegInfo (
- mkVirtualReg,
- regDotColor
+ mkVirtualReg,
+ regDotColor
)
where
@@ -30,9 +23,9 @@ import X86.Regs
mkVirtualReg :: Unique -> Size -> VirtualReg
mkVirtualReg u size
= case size of
- FF32 -> VirtualRegSSE u
- FF64 -> VirtualRegSSE u
- FF80 -> VirtualRegD u
+ FF32 -> VirtualRegSSE u
+ FF64 -> VirtualRegSSE u
+ FF80 -> VirtualRegD u
_other -> VirtualRegI u
regDotColor :: Platform -> RealReg -> SDoc
@@ -65,11 +58,10 @@ normalRegColors platform
fpRegColors :: [(Reg,String)]
fpRegColors =
[ (fake0, "#ff00ff")
- , (fake1, "#ff00aa")
- , (fake2, "#aa00ff")
- , (fake3, "#aa00aa")
- , (fake4, "#ff0055")
- , (fake5, "#5500ff") ]
-
- ++ zip (map regSingle [24..39]) (repeat "red")
+ , (fake1, "#ff00aa")
+ , (fake2, "#aa00ff")
+ , (fake3, "#aa00aa")
+ , (fake4, "#ff0055")
+ , (fake5, "#5500ff") ]
+ ++ zip (map regSingle [24..39]) (repeat "red")
diff --git a/compiler/nativeGen/X86/Regs.hs b/compiler/nativeGen/X86/Regs.hs
index b5139c92bf11..4162e2b703f3 100644
--- a/compiler/nativeGen/X86/Regs.hs
+++ b/compiler/nativeGen/X86/Regs.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
module X86.Regs (
-- squeese functions for the graph allocator
virtualRegSqueeze,
@@ -403,6 +405,9 @@ callClobberedRegs :: Platform -> [Reg]
-- caller-saves registers
callClobberedRegs platform
| target32Bit platform = [eax,ecx,edx] ++ map regSingle (floatregnos platform)
+ | platformOS platform == OSMinGW32
+ = [rax,rcx,rdx,r8,r9,r10,r11]
+ ++ map regSingle (floatregnos platform)
| otherwise
-- all xmm regs are caller-saves
-- caller-saves registers
diff --git a/compiler/parser/Ctype.lhs b/compiler/parser/Ctype.lhs
index b5173b2612af..7233f50e7f3c 100644
--- a/compiler/parser/Ctype.lhs
+++ b/compiler/parser/Ctype.lhs
@@ -1,32 +1,26 @@
Character classification
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module Ctype
- ( is_ident -- Char# -> Bool
- , is_symbol -- Char# -> Bool
- , is_any -- Char# -> Bool
- , is_space -- Char# -> Bool
- , is_lower -- Char# -> Bool
- , is_upper -- Char# -> Bool
- , is_digit -- Char# -> Bool
- , is_alphanum -- Char# -> Bool
-
- , is_decdigit, is_hexdigit, is_octdigit
- , hexDigit, octDecDigit
- ) where
+ ( is_ident -- Char# -> Bool
+ , is_symbol -- Char# -> Bool
+ , is_any -- Char# -> Bool
+ , is_space -- Char# -> Bool
+ , is_lower -- Char# -> Bool
+ , is_upper -- Char# -> Bool
+ , is_digit -- Char# -> Bool
+ , is_alphanum -- Char# -> Bool
+
+ , is_decdigit, is_hexdigit, is_octdigit, is_bindigit
+ , hexDigit, octDecDigit
+ ) where
#include "HsVersions.h"
-import Data.Int ( Int32 )
-import Data.Bits ( Bits((.&.)) )
-import Data.Char ( ord, chr )
+import Data.Int ( Int32 )
+import Data.Bits ( Bits((.&.)) )
+import Data.Char ( ord, chr )
import Panic
\end{code}
@@ -75,17 +69,20 @@ octDecDigit c = ord c - ord '0'
is_decdigit :: Char -> Bool
is_decdigit c
- = c >= '0' && c <= '9'
+ = c >= '0' && c <= '9'
is_hexdigit :: Char -> Bool
is_hexdigit c
- = is_decdigit c
- || (c >= 'a' && c <= 'f')
- || (c >= 'A' && c <= 'F')
+ = is_decdigit c
+ || (c >= 'a' && c <= 'f')
+ || (c >= 'A' && c <= 'F')
is_octdigit :: Char -> Bool
is_octdigit c = c >= '0' && c <= '7'
+is_bindigit :: Char -> Bool
+is_bindigit c = c == '0' || c == '1'
+
to_lower :: Char -> Char
to_lower c
| c >= 'A' && c <= 'Z' = chr (ord c - (ord 'A' - ord 'a'))
@@ -108,7 +105,7 @@ charType c = case c of
'\7' -> 0 -- \007
'\8' -> 0 -- \010
'\9' -> cSpace -- \t (not allowed in strings, so !cAny)
- '\10' -> cSpace -- \n (ditto)
+ '\10' -> cSpace -- \n (ditto)
'\11' -> cSpace -- \v (ditto)
'\12' -> cSpace -- \f (ditto)
'\13' -> cSpace -- ^M (ditto)
diff --git a/compiler/parser/LexCore.hs b/compiler/parser/LexCore.hs
deleted file mode 100644
index 861fffb7f676..000000000000
--- a/compiler/parser/LexCore.hs
+++ /dev/null
@@ -1,115 +0,0 @@
-module LexCore where
-
-import ParserCoreUtils
-import Panic
-import Data.Char
-import Numeric
-
-isNameChar :: Char -> Bool
-isNameChar c = isAlpha c || isDigit c || (c == '_') || (c == '\'')
- || (c == '$') || (c == '-') || (c == '.')
-
-isKeywordChar :: Char -> Bool
-isKeywordChar c = isAlpha c || (c == '_')
-
-lexer :: (Token -> P a) -> P a
-lexer cont [] = cont TKEOF []
-lexer cont ('\n':cs) = \line -> lexer cont cs (line+1)
-lexer cont ('-':'>':cs) = cont TKrarrow cs
-
-lexer cont (c:cs)
- | isSpace c = lexer cont cs
- | isLower c || (c == '_') = lexName cont TKname (c:cs)
- | isUpper c = lexName cont TKcname (c:cs)
- | isDigit c || (c == '-') = lexNum cont (c:cs)
-
-lexer cont ('%':cs) = lexKeyword cont cs
-lexer cont ('\'':cs) = lexChar cont cs
-lexer cont ('\"':cs) = lexString [] cont cs
-lexer cont ('#':cs) = cont TKhash cs
-lexer cont ('(':cs) = cont TKoparen cs
-lexer cont (')':cs) = cont TKcparen cs
-lexer cont ('{':cs) = cont TKobrace cs
-lexer cont ('}':cs) = cont TKcbrace cs
-lexer cont ('=':cs) = cont TKeq cs
-lexer cont (':':'=':':':cs) = cont TKcoloneqcolon cs
-lexer cont (':':':':cs) = cont TKcoloncolon cs
-lexer cont ('*':cs) = cont TKstar cs
-lexer cont ('.':cs) = cont TKdot cs
-lexer cont ('\\':cs) = cont TKlambda cs
-lexer cont ('@':cs) = cont TKat cs
-lexer cont ('?':cs) = cont TKquestion cs
-lexer cont (';':cs) = cont TKsemicolon cs
--- 20060420 GHC spits out constructors with colon in them nowadays. jds
--- 20061103 but it's easier to parse if we split on the colon, and treat them
--- as several tokens
-lexer cont (':':cs) = cont TKcolon cs
--- 20060420 Likewise does it create identifiers starting with dollar. jds
-lexer cont ('$':cs) = lexName cont TKname ('$':cs)
-lexer _ (c:_) = failP "invalid character" [c]
-
-lexChar :: (Token -> String -> Int -> ParseResult a) -> String -> Int
- -> ParseResult a
-lexChar cont ('\\':'x':h1:h0:'\'':cs)
- | isHexEscape [h1,h0] = cont (TKchar (hexToChar h1 h0)) cs
-lexChar _ ('\\':cs) = failP "invalid char character" ('\\':(take 10 cs))
-lexChar _ ('\'':_) = failP "invalid char character" ['\'']
-lexChar _ ('\"':_) = failP "invalid char character" ['\"']
-lexChar cont (c:'\'':cs) = cont (TKchar c) cs
-lexChar _ cs = panic ("lexChar: " ++ show cs)
-
-lexString :: String -> (Token -> [Char] -> Int -> ParseResult a)
- -> String -> Int -> ParseResult a
-lexString s cont ('\\':'x':h1:h0:cs)
- | isHexEscape [h1,h0] = lexString (s++[hexToChar h1 h0]) cont cs
-lexString _ _ ('\\':_) = failP "invalid string character" ['\\']
-lexString _ _ ('\'':_) = failP "invalid string character" ['\'']
-lexString s cont ('\"':cs) = cont (TKstring s) cs
-lexString s cont (c:cs) = lexString (s++[c]) cont cs
-lexString _ _ [] = panic "lexString []"
-
-isHexEscape :: String -> Bool
-isHexEscape = all (\c -> isHexDigit c && (isDigit c || isLower c))
-
-hexToChar :: Char -> Char -> Char
-hexToChar h1 h0 = chr (digitToInt h1 * 16 + digitToInt h0)
-
-lexNum :: (Token -> String -> a) -> String -> a
-lexNum cont cs =
- case cs of
- ('-':cs) -> f (-1) cs
- _ -> f 1 cs
- where f sgn cs =
- case span isDigit cs of
- (digits,'.':c:rest)
- | isDigit c -> cont (TKrational (fromInteger sgn * r)) rest'
- where ((r,rest'):_) = readFloat (digits ++ ('.':c:rest))
- -- When reading a floating-point number, which is
- -- a bit complicated, use the standard library function
- -- "readFloat"
- (digits,rest) -> cont (TKinteger (sgn * (read digits))) rest
-
-lexName :: (a -> String -> b) -> (String -> a) -> String -> b
-lexName cont cstr cs = cont (cstr name) rest
- where (name,rest) = span isNameChar cs
-
-lexKeyword :: (Token -> [Char] -> Int -> ParseResult a) -> String -> Int
- -> ParseResult a
-lexKeyword cont cs =
- case span isKeywordChar cs of
- ("module",rest) -> cont TKmodule rest
- ("data",rest) -> cont TKdata rest
- ("newtype",rest) -> cont TKnewtype rest
- ("forall",rest) -> cont TKforall rest
- ("rec",rest) -> cont TKrec rest
- ("let",rest) -> cont TKlet rest
- ("in",rest) -> cont TKin rest
- ("case",rest) -> cont TKcase rest
- ("of",rest) -> cont TKof rest
- ("cast",rest) -> cont TKcast rest
- ("note",rest) -> cont TKnote rest
- ("external",rest) -> cont TKexternal rest
- ("local",rest) -> cont TKlocal rest
- ("_",rest) -> cont TKwild rest
- _ -> failP "invalid keyword" ('%':cs)
-
diff --git a/compiler/parser/Lexer.x b/compiler/parser/Lexer.x
index 3d02393d170c..88a0f07d9041 100644
--- a/compiler/parser/Lexer.x
+++ b/compiler/parser/Lexer.x
@@ -117,6 +117,7 @@ $small = [$ascsmall $unismall \_]
$unigraphic = \x06 -- Trick Alex into handling Unicode. See alexGetChar.
$graphic = [$small $large $symbol $digit $special $unigraphic \:\"\']
+$binit = 0-1
$octit = 0-7
$hexit = [$decdigit A-F a-f]
$symchar = [$symbol \:]
@@ -134,6 +135,7 @@ $docsym = [\| \^ \* \$]
@consym = \: $symchar*
@decimal = $decdigit+
+@binary = $binit+
@octal = $octit+
@hexadecimal = $hexit+
@exponent = [eE] [\-\+]? @decimal
@@ -401,9 +403,12 @@ $tab+ { warn Opt_WarnTabs (text "Tab character") }
<0> {
-- Normal integral literals (:: Num a => a, from Integer)
@decimal { tok_num positive 0 0 decimal }
+ 0[bB] @binary / { ifExtension binaryLiteralsEnabled } { tok_num positive 2 2 binary }
0[oO] @octal { tok_num positive 2 2 octal }
0[xX] @hexadecimal { tok_num positive 2 2 hexadecimal }
@negative @decimal / { ifExtension negativeLiteralsEnabled } { tok_num negative 1 1 decimal }
+ @negative 0[bB] @binary / { ifExtension negativeLiteralsEnabled `alexAndPred`
+ ifExtension binaryLiteralsEnabled } { tok_num negative 3 3 binary }
@negative 0[oO] @octal / { ifExtension negativeLiteralsEnabled } { tok_num negative 3 3 octal }
@negative 0[xX] @hexadecimal / { ifExtension negativeLiteralsEnabled } { tok_num negative 3 3 hexadecimal }
@@ -417,13 +422,19 @@ $tab+ { warn Opt_WarnTabs (text "Tab character") }
-- It's simpler (and faster?) to give separate cases to the negatives,
-- especially considering octal/hexadecimal prefixes.
@decimal \# / { ifExtension magicHashEnabled } { tok_primint positive 0 1 decimal }
+ 0[bB] @binary \# / { ifExtension magicHashEnabled `alexAndPred`
+ ifExtension binaryLiteralsEnabled } { tok_primint positive 2 3 binary }
0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 octal }
0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 hexadecimal }
@negative @decimal \# / { ifExtension magicHashEnabled } { tok_primint negative 1 2 decimal }
+ @negative 0[bB] @binary \# / { ifExtension magicHashEnabled `alexAndPred`
+ ifExtension binaryLiteralsEnabled } { tok_primint negative 3 4 binary }
@negative 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 octal }
@negative 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 hexadecimal }
@decimal \# \# / { ifExtension magicHashEnabled } { tok_primword 0 2 decimal }
+ 0[bB] @binary \# \# / { ifExtension magicHashEnabled `alexAndPred`
+ ifExtension binaryLiteralsEnabled } { tok_primword 2 4 binary }
0[oO] @octal \# \# / { ifExtension magicHashEnabled } { tok_primword 2 4 octal }
0[xX] @hexadecimal \# \# / { ifExtension magicHashEnabled } { tok_primword 2 4 hexadecimal }
@@ -516,6 +527,10 @@ data Token
| ITvect_scalar_prag
| ITnovect_prag
| ITminimal_prag
+ | IToverlappable_prag -- instance overlap mode
+ | IToverlapping_prag -- instance overlap mode
+ | IToverlaps_prag -- instance overlap mode
+ | ITincoherent_prag -- instance overlap mode
| ITctype
| ITdotdot -- reserved symbols
@@ -635,7 +650,7 @@ data Token
-- facilitates using a keyword in two different extensions that can be
-- activated independently)
--
-reservedWordsFM :: UniqFM (Token, Int)
+reservedWordsFM :: UniqFM (Token, ExtsBitmap)
reservedWordsFM = listToUFM $
map (\(x, y, z) -> (mkFastString x, (y, z)))
[( "_", ITunderscore, 0 ),
@@ -664,34 +679,34 @@ reservedWordsFM = listToUFM $
( "type", ITtype, 0 ),
( "where", ITwhere, 0 ),
- ( "forall", ITforall, bit explicitForallBit .|.
- bit inRulePragBit),
- ( "mdo", ITmdo, bit recursiveDoBit),
+ ( "forall", ITforall, xbit ExplicitForallBit .|.
+ xbit InRulePragBit),
+ ( "mdo", ITmdo, xbit RecursiveDoBit),
-- See Note [Lexing type pseudo-keywords]
( "family", ITfamily, 0 ),
( "role", ITrole, 0 ),
- ( "pattern", ITpattern, bit patternSynonymsBit),
- ( "group", ITgroup, bit transformComprehensionsBit),
- ( "by", ITby, bit transformComprehensionsBit),
- ( "using", ITusing, bit transformComprehensionsBit),
-
- ( "foreign", ITforeign, bit ffiBit),
- ( "export", ITexport, bit ffiBit),
- ( "label", ITlabel, bit ffiBit),
- ( "dynamic", ITdynamic, bit ffiBit),
- ( "safe", ITsafe, bit ffiBit .|.
- bit safeHaskellBit),
- ( "interruptible", ITinterruptible, bit interruptibleFfiBit),
- ( "unsafe", ITunsafe, bit ffiBit),
- ( "stdcall", ITstdcallconv, bit ffiBit),
- ( "ccall", ITccallconv, bit ffiBit),
- ( "capi", ITcapiconv, bit cApiFfiBit),
- ( "prim", ITprimcallconv, bit ffiBit),
- ( "javascript", ITjavascriptcallconv, bit ffiBit),
-
- ( "rec", ITrec, bit arrowsBit .|.
- bit recursiveDoBit),
- ( "proc", ITproc, bit arrowsBit)
+ ( "pattern", ITpattern, xbit PatternSynonymsBit),
+ ( "group", ITgroup, xbit TransformComprehensionsBit),
+ ( "by", ITby, xbit TransformComprehensionsBit),
+ ( "using", ITusing, xbit TransformComprehensionsBit),
+
+ ( "foreign", ITforeign, xbit FfiBit),
+ ( "export", ITexport, xbit FfiBit),
+ ( "label", ITlabel, xbit FfiBit),
+ ( "dynamic", ITdynamic, xbit FfiBit),
+ ( "safe", ITsafe, xbit FfiBit .|.
+ xbit SafeHaskellBit),
+ ( "interruptible", ITinterruptible, xbit InterruptibleFfiBit),
+ ( "unsafe", ITunsafe, xbit FfiBit),
+ ( "stdcall", ITstdcallconv, xbit FfiBit),
+ ( "ccall", ITccallconv, xbit FfiBit),
+ ( "capi", ITcapiconv, xbit CApiFfiBit),
+ ( "prim", ITprimcallconv, xbit FfiBit),
+ ( "javascript", ITjavascriptcallconv, xbit FfiBit),
+
+ ( "rec", ITrec, xbit ArrowsBit .|.
+ xbit RecursiveDoBit),
+ ( "proc", ITproc, xbit ArrowsBit)
]
{-----------------------------------
@@ -711,7 +726,7 @@ Also, note that these are included in the `varid` production in the parser --
a key detail to make all this work.
-------------------------------------}
-reservedSymsFM :: UniqFM (Token, Int -> Bool)
+reservedSymsFM :: UniqFM (Token, ExtsBitmap -> Bool)
reservedSymsFM = listToUFM $
map (\ (x,y,z) -> (mkFastString x,(y,z)))
[ ("..", ITdotdot, always)
@@ -822,11 +837,11 @@ nextCharIs buf p = not (atEnd buf) && p (currentChar buf)
nextCharIsNot :: StringBuffer -> (Char -> Bool) -> Bool
nextCharIsNot buf p = not (nextCharIs buf p)
-notFollowedBy :: Char -> AlexAccPred Int
+notFollowedBy :: Char -> AlexAccPred ExtsBitmap
notFollowedBy char _ _ _ (AI _ buf)
= nextCharIsNot buf (== char)
-notFollowedBySymbol :: AlexAccPred Int
+notFollowedBySymbol :: AlexAccPred ExtsBitmap
notFollowedBySymbol _ _ _ (AI _ buf)
= nextCharIsNot buf (`elem` "!#$%&*+./<=>?@\\^|-~")
@@ -835,7 +850,7 @@ notFollowedBySymbol _ _ _ (AI _ buf)
-- maximal munch, but not always, because the nested comment rule is
-- valid in all states, but the doc-comment rules are only valid in
-- the non-layout states.
-isNormalComment :: AlexAccPred Int
+isNormalComment :: AlexAccPred ExtsBitmap
isNormalComment bits _ _ (AI _ buf)
| haddockEnabled bits = notFollowedByDocOrPragma
| otherwise = nextCharIsNot buf (== '#')
@@ -849,10 +864,10 @@ afterOptionalSpace buf p
then p (snd (nextChar buf))
else p buf
-atEOL :: AlexAccPred Int
+atEOL :: AlexAccPred ExtsBitmap
atEOL _ _ _ (AI _ buf) = atEnd buf || currentChar buf == '\n'
-ifExtension :: (Int -> Bool) -> AlexAccPred Int
+ifExtension :: (ExtsBitmap -> Bool) -> AlexAccPred ExtsBitmap
ifExtension pred bits _ _ _ = pred bits
multiline_doc_comment :: Action
@@ -954,12 +969,12 @@ withLexedDocType lexDocComment = do
-- off again at the end of the pragma.
rulePrag :: Action
rulePrag span _buf _len = do
- setExts (.|. bit inRulePragBit)
+ setExts (.|. xbit InRulePragBit)
return (L span ITrules_prag)
endPrag :: Action
endPrag span _buf _len = do
- setExts (.&. complement (bit inRulePragBit))
+ setExts (.&. complement (xbit InRulePragBit))
return (L span ITclose_prag)
-- docCommentEnd
@@ -1112,6 +1127,7 @@ positive = id
negative = negate
decimal, octal, hexadecimal :: (Integer, Char -> Int)
decimal = (10,octDecDigit)
+binary = (2,octDecDigit)
octal = (8,octDecDigit)
hexadecimal = (16,hexDigit)
@@ -1410,6 +1426,7 @@ lex_escape = do
'x' -> readNum is_hexdigit 16 hexDigit
'o' -> readNum is_octdigit 8 octDecDigit
+ 'b' -> readNum is_bindigit 2 octDecDigit
x | is_decdigit x -> readNum2 is_decdigit 10 octDecDigit (octDecDigit x)
c1 -> do
@@ -1592,7 +1609,7 @@ data PState = PState {
last_loc :: RealSrcSpan, -- pos of previous token
last_len :: !Int, -- len of previous token
loc :: RealSrcLoc, -- current loc (end of prev token + 1)
- extsBitmap :: !Int, -- bitmap that determines permitted
+ extsBitmap :: !ExtsBitmap, -- bitmap that determines permitted
-- extensions
context :: [LayoutContext],
lex_state :: [Int],
@@ -1664,18 +1681,18 @@ getPState = P $ \s -> POk s s
instance HasDynFlags P where
getDynFlags = P $ \s -> POk s (dflags s)
-withThisPackage :: (PackageId -> a) -> P a
+withThisPackage :: (PackageKey -> a) -> P a
withThisPackage f
= do pkg <- liftM thisPackage getDynFlags
return $ f pkg
-extension :: (Int -> Bool) -> P Bool
+extension :: (ExtsBitmap -> Bool) -> P Bool
extension p = P $ \s -> POk s (p $! extsBitmap s)
-getExts :: P Int
+getExts :: P ExtsBitmap
getExts = P $ \s -> POk s (extsBitmap s)
-setExts :: (Int -> Int) -> P ()
+setExts :: (ExtsBitmap -> ExtsBitmap) -> P ()
setExts f = P $ \s -> POk s{ extsBitmap = f (extsBitmap s) } ()
setSrcLoc :: RealSrcLoc -> P ()
@@ -1855,130 +1872,110 @@ setAlrExpectingOCurly b = P $ \s -> POk (s {alr_expecting_ocurly = b}) ()
-- for reasons of efficiency, flags indicating language extensions (eg,
-- -fglasgow-exts or -XParallelArrays) are represented by a bitmap
--- stored in an unboxed Int
-
-ffiBit :: Int
-ffiBit= 0
-interruptibleFfiBit :: Int
-interruptibleFfiBit = 1
-cApiFfiBit :: Int
-cApiFfiBit = 2
-parrBit :: Int
-parrBit = 3
-arrowsBit :: Int
-arrowsBit = 4
-thBit :: Int
-thBit = 5
-ipBit :: Int
-ipBit = 6
-explicitForallBit :: Int
-explicitForallBit = 7 -- the 'forall' keyword and '.' symbol
-bangPatBit :: Int
-bangPatBit = 8 -- Tells the parser to understand bang-patterns
- -- (doesn't affect the lexer)
-patternSynonymsBit :: Int
-patternSynonymsBit = 9 -- pattern synonyms
-haddockBit :: Int
-haddockBit = 10 -- Lex and parse Haddock comments
-magicHashBit :: Int
-magicHashBit = 11 -- "#" in both functions and operators
-kindSigsBit :: Int
-kindSigsBit = 12 -- Kind signatures on type variables
-recursiveDoBit :: Int
-recursiveDoBit = 13 -- mdo
-unicodeSyntaxBit :: Int
-unicodeSyntaxBit = 14 -- the forall symbol, arrow symbols, etc
-unboxedTuplesBit :: Int
-unboxedTuplesBit = 15 -- (# and #)
-datatypeContextsBit :: Int
-datatypeContextsBit = 16
-transformComprehensionsBit :: Int
-transformComprehensionsBit = 17
-qqBit :: Int
-qqBit = 18 -- enable quasiquoting
-inRulePragBit :: Int
-inRulePragBit = 19
-rawTokenStreamBit :: Int
-rawTokenStreamBit = 20 -- producing a token stream with all comments included
-sccProfilingOnBit :: Int
-sccProfilingOnBit = 21
-hpcBit :: Int
-hpcBit = 22
-alternativeLayoutRuleBit :: Int
-alternativeLayoutRuleBit = 23
-relaxedLayoutBit :: Int
-relaxedLayoutBit = 24
-nondecreasingIndentationBit :: Int
-nondecreasingIndentationBit = 25
-safeHaskellBit :: Int
-safeHaskellBit = 26
-traditionalRecordSyntaxBit :: Int
-traditionalRecordSyntaxBit = 27
-typeLiteralsBit :: Int
-typeLiteralsBit = 28
-explicitNamespacesBit :: Int
-explicitNamespacesBit = 29
-lambdaCaseBit :: Int
-lambdaCaseBit = 30
-negativeLiteralsBit :: Int
-negativeLiteralsBit = 31
-
-
-always :: Int -> Bool
+-- stored in an unboxed Word64
+type ExtsBitmap = Word64
+
+xbit :: ExtBits -> ExtsBitmap
+xbit = bit . fromEnum
+
+xtest :: ExtBits -> ExtsBitmap -> Bool
+xtest ext xmap = testBit xmap (fromEnum ext)
+
+data ExtBits
+ = FfiBit
+ | InterruptibleFfiBit
+ | CApiFfiBit
+ | ParrBit
+ | ArrowsBit
+ | ThBit
+ | IpBit
+ | ExplicitForallBit -- the 'forall' keyword and '.' symbol
+ | BangPatBit -- Tells the parser to understand bang-patterns
+ -- (doesn't affect the lexer)
+ | PatternSynonymsBit -- pattern synonyms
+ | HaddockBit-- Lex and parse Haddock comments
+ | MagicHashBit -- "#" in both functions and operators
+ | KindSigsBit -- Kind signatures on type variables
+ | RecursiveDoBit -- mdo
+ | UnicodeSyntaxBit -- the forall symbol, arrow symbols, etc
+ | UnboxedTuplesBit -- (# and #)
+ | DatatypeContextsBit
+ | TransformComprehensionsBit
+ | QqBit -- enable quasiquoting
+ | InRulePragBit
+ | RawTokenStreamBit -- producing a token stream with all comments included
+ | SccProfilingOnBit
+ | HpcBit
+ | AlternativeLayoutRuleBit
+ | RelaxedLayoutBit
+ | NondecreasingIndentationBit
+ | SafeHaskellBit
+ | TraditionalRecordSyntaxBit
+ | TypeLiteralsBit
+ | ExplicitNamespacesBit
+ | LambdaCaseBit
+ | BinaryLiteralsBit
+ | NegativeLiteralsBit
+ deriving Enum
+
+
+always :: ExtsBitmap -> Bool
always _ = True
-parrEnabled :: Int -> Bool
-parrEnabled flags = testBit flags parrBit
-arrowsEnabled :: Int -> Bool
-arrowsEnabled flags = testBit flags arrowsBit
-thEnabled :: Int -> Bool
-thEnabled flags = testBit flags thBit
-ipEnabled :: Int -> Bool
-ipEnabled flags = testBit flags ipBit
-explicitForallEnabled :: Int -> Bool
-explicitForallEnabled flags = testBit flags explicitForallBit
-bangPatEnabled :: Int -> Bool
-bangPatEnabled flags = testBit flags bangPatBit
-haddockEnabled :: Int -> Bool
-haddockEnabled flags = testBit flags haddockBit
-magicHashEnabled :: Int -> Bool
-magicHashEnabled flags = testBit flags magicHashBit
--- kindSigsEnabled :: Int -> Bool
--- kindSigsEnabled flags = testBit flags kindSigsBit
-unicodeSyntaxEnabled :: Int -> Bool
-unicodeSyntaxEnabled flags = testBit flags unicodeSyntaxBit
-unboxedTuplesEnabled :: Int -> Bool
-unboxedTuplesEnabled flags = testBit flags unboxedTuplesBit
-datatypeContextsEnabled :: Int -> Bool
-datatypeContextsEnabled flags = testBit flags datatypeContextsBit
-qqEnabled :: Int -> Bool
-qqEnabled flags = testBit flags qqBit
-inRulePrag :: Int -> Bool
-inRulePrag flags = testBit flags inRulePragBit
-rawTokenStreamEnabled :: Int -> Bool
-rawTokenStreamEnabled flags = testBit flags rawTokenStreamBit
-alternativeLayoutRule :: Int -> Bool
-alternativeLayoutRule flags = testBit flags alternativeLayoutRuleBit
-hpcEnabled :: Int -> Bool
-hpcEnabled flags = testBit flags hpcBit
-relaxedLayout :: Int -> Bool
-relaxedLayout flags = testBit flags relaxedLayoutBit
-nondecreasingIndentation :: Int -> Bool
-nondecreasingIndentation flags = testBit flags nondecreasingIndentationBit
-sccProfilingOn :: Int -> Bool
-sccProfilingOn flags = testBit flags sccProfilingOnBit
-traditionalRecordSyntaxEnabled :: Int -> Bool
-traditionalRecordSyntaxEnabled flags = testBit flags traditionalRecordSyntaxBit
-typeLiteralsEnabled :: Int -> Bool
-typeLiteralsEnabled flags = testBit flags typeLiteralsBit
-
-explicitNamespacesEnabled :: Int -> Bool
-explicitNamespacesEnabled flags = testBit flags explicitNamespacesBit
-lambdaCaseEnabled :: Int -> Bool
-lambdaCaseEnabled flags = testBit flags lambdaCaseBit
-negativeLiteralsEnabled :: Int -> Bool
-negativeLiteralsEnabled flags = testBit flags negativeLiteralsBit
-patternSynonymsEnabled :: Int -> Bool
-patternSynonymsEnabled flags = testBit flags patternSynonymsBit
+parrEnabled :: ExtsBitmap -> Bool
+parrEnabled = xtest ParrBit
+arrowsEnabled :: ExtsBitmap -> Bool
+arrowsEnabled = xtest ArrowsBit
+thEnabled :: ExtsBitmap -> Bool
+thEnabled = xtest ThBit
+ipEnabled :: ExtsBitmap -> Bool
+ipEnabled = xtest IpBit
+explicitForallEnabled :: ExtsBitmap -> Bool
+explicitForallEnabled = xtest ExplicitForallBit
+bangPatEnabled :: ExtsBitmap -> Bool
+bangPatEnabled = xtest BangPatBit
+haddockEnabled :: ExtsBitmap -> Bool
+haddockEnabled = xtest HaddockBit
+magicHashEnabled :: ExtsBitmap -> Bool
+magicHashEnabled = xtest MagicHashBit
+-- kindSigsEnabled :: ExtsBitmap -> Bool
+-- kindSigsEnabled = xtest KindSigsBit
+unicodeSyntaxEnabled :: ExtsBitmap -> Bool
+unicodeSyntaxEnabled = xtest UnicodeSyntaxBit
+unboxedTuplesEnabled :: ExtsBitmap -> Bool
+unboxedTuplesEnabled = xtest UnboxedTuplesBit
+datatypeContextsEnabled :: ExtsBitmap -> Bool
+datatypeContextsEnabled = xtest DatatypeContextsBit
+qqEnabled :: ExtsBitmap -> Bool
+qqEnabled = xtest QqBit
+inRulePrag :: ExtsBitmap -> Bool
+inRulePrag = xtest InRulePragBit
+rawTokenStreamEnabled :: ExtsBitmap -> Bool
+rawTokenStreamEnabled = xtest RawTokenStreamBit
+alternativeLayoutRule :: ExtsBitmap -> Bool
+alternativeLayoutRule = xtest AlternativeLayoutRuleBit
+hpcEnabled :: ExtsBitmap -> Bool
+hpcEnabled = xtest HpcBit
+relaxedLayout :: ExtsBitmap -> Bool
+relaxedLayout = xtest RelaxedLayoutBit
+nondecreasingIndentation :: ExtsBitmap -> Bool
+nondecreasingIndentation = xtest NondecreasingIndentationBit
+sccProfilingOn :: ExtsBitmap -> Bool
+sccProfilingOn = xtest SccProfilingOnBit
+traditionalRecordSyntaxEnabled :: ExtsBitmap -> Bool
+traditionalRecordSyntaxEnabled = xtest TraditionalRecordSyntaxBit
+typeLiteralsEnabled :: ExtsBitmap -> Bool
+typeLiteralsEnabled = xtest TypeLiteralsBit
+
+explicitNamespacesEnabled :: ExtsBitmap -> Bool
+explicitNamespacesEnabled = xtest ExplicitNamespacesBit
+lambdaCaseEnabled :: ExtsBitmap -> Bool
+lambdaCaseEnabled = xtest LambdaCaseBit
+binaryLiteralsEnabled :: ExtsBitmap -> Bool
+binaryLiteralsEnabled = xtest BinaryLiteralsBit
+negativeLiteralsEnabled :: ExtsBitmap -> Bool
+negativeLiteralsEnabled = xtest NegativeLiteralsBit
+patternSynonymsEnabled :: ExtsBitmap -> Bool
+patternSynonymsEnabled = xtest PatternSynonymsBit
-- PState for parsing options pragmas
--
@@ -1999,7 +1996,7 @@ mkPState flags buf loc =
last_loc = mkRealSrcSpan loc loc,
last_len = 0,
loc = loc,
- extsBitmap = fromIntegral bitmap,
+ extsBitmap = bitmap,
context = [],
lex_state = [bol, 0],
srcfiles = [],
@@ -2011,41 +2008,42 @@ mkPState flags buf loc =
alr_justClosedExplicitLetBlock = False
}
where
- bitmap = ffiBit `setBitIf` xopt Opt_ForeignFunctionInterface flags
- .|. interruptibleFfiBit `setBitIf` xopt Opt_InterruptibleFFI flags
- .|. cApiFfiBit `setBitIf` xopt Opt_CApiFFI flags
- .|. parrBit `setBitIf` xopt Opt_ParallelArrays flags
- .|. arrowsBit `setBitIf` xopt Opt_Arrows flags
- .|. thBit `setBitIf` xopt Opt_TemplateHaskell flags
- .|. qqBit `setBitIf` xopt Opt_QuasiQuotes flags
- .|. ipBit `setBitIf` xopt Opt_ImplicitParams flags
- .|. explicitForallBit `setBitIf` xopt Opt_ExplicitForAll flags
- .|. bangPatBit `setBitIf` xopt Opt_BangPatterns flags
- .|. haddockBit `setBitIf` gopt Opt_Haddock flags
- .|. magicHashBit `setBitIf` xopt Opt_MagicHash flags
- .|. kindSigsBit `setBitIf` xopt Opt_KindSignatures flags
- .|. recursiveDoBit `setBitIf` xopt Opt_RecursiveDo flags
- .|. unicodeSyntaxBit `setBitIf` xopt Opt_UnicodeSyntax flags
- .|. unboxedTuplesBit `setBitIf` xopt Opt_UnboxedTuples flags
- .|. datatypeContextsBit `setBitIf` xopt Opt_DatatypeContexts flags
- .|. transformComprehensionsBit `setBitIf` xopt Opt_TransformListComp flags
- .|. transformComprehensionsBit `setBitIf` xopt Opt_MonadComprehensions flags
- .|. rawTokenStreamBit `setBitIf` gopt Opt_KeepRawTokenStream flags
- .|. hpcBit `setBitIf` gopt Opt_Hpc flags
- .|. alternativeLayoutRuleBit `setBitIf` xopt Opt_AlternativeLayoutRule flags
- .|. relaxedLayoutBit `setBitIf` xopt Opt_RelaxedLayout flags
- .|. sccProfilingOnBit `setBitIf` gopt Opt_SccProfilingOn flags
- .|. nondecreasingIndentationBit `setBitIf` xopt Opt_NondecreasingIndentation flags
- .|. safeHaskellBit `setBitIf` safeImportsOn flags
- .|. traditionalRecordSyntaxBit `setBitIf` xopt Opt_TraditionalRecordSyntax flags
- .|. typeLiteralsBit `setBitIf` xopt Opt_DataKinds flags
- .|. explicitNamespacesBit `setBitIf` xopt Opt_ExplicitNamespaces flags
- .|. lambdaCaseBit `setBitIf` xopt Opt_LambdaCase flags
- .|. negativeLiteralsBit `setBitIf` xopt Opt_NegativeLiterals flags
- .|. patternSynonymsBit `setBitIf` xopt Opt_PatternSynonyms flags
+ bitmap = FfiBit `setBitIf` xopt Opt_ForeignFunctionInterface flags
+ .|. InterruptibleFfiBit `setBitIf` xopt Opt_InterruptibleFFI flags
+ .|. CApiFfiBit `setBitIf` xopt Opt_CApiFFI flags
+ .|. ParrBit `setBitIf` xopt Opt_ParallelArrays flags
+ .|. ArrowsBit `setBitIf` xopt Opt_Arrows flags
+ .|. ThBit `setBitIf` xopt Opt_TemplateHaskell flags
+ .|. QqBit `setBitIf` xopt Opt_QuasiQuotes flags
+ .|. IpBit `setBitIf` xopt Opt_ImplicitParams flags
+ .|. ExplicitForallBit `setBitIf` xopt Opt_ExplicitForAll flags
+ .|. BangPatBit `setBitIf` xopt Opt_BangPatterns flags
+ .|. HaddockBit `setBitIf` gopt Opt_Haddock flags
+ .|. MagicHashBit `setBitIf` xopt Opt_MagicHash flags
+ .|. KindSigsBit `setBitIf` xopt Opt_KindSignatures flags
+ .|. RecursiveDoBit `setBitIf` xopt Opt_RecursiveDo flags
+ .|. UnicodeSyntaxBit `setBitIf` xopt Opt_UnicodeSyntax flags
+ .|. UnboxedTuplesBit `setBitIf` xopt Opt_UnboxedTuples flags
+ .|. DatatypeContextsBit `setBitIf` xopt Opt_DatatypeContexts flags
+ .|. TransformComprehensionsBit `setBitIf` xopt Opt_TransformListComp flags
+ .|. TransformComprehensionsBit `setBitIf` xopt Opt_MonadComprehensions flags
+ .|. RawTokenStreamBit `setBitIf` gopt Opt_KeepRawTokenStream flags
+ .|. HpcBit `setBitIf` gopt Opt_Hpc flags
+ .|. AlternativeLayoutRuleBit `setBitIf` xopt Opt_AlternativeLayoutRule flags
+ .|. RelaxedLayoutBit `setBitIf` xopt Opt_RelaxedLayout flags
+ .|. SccProfilingOnBit `setBitIf` gopt Opt_SccProfilingOn flags
+ .|. NondecreasingIndentationBit `setBitIf` xopt Opt_NondecreasingIndentation flags
+ .|. SafeHaskellBit `setBitIf` safeImportsOn flags
+ .|. TraditionalRecordSyntaxBit `setBitIf` xopt Opt_TraditionalRecordSyntax flags
+ .|. TypeLiteralsBit `setBitIf` xopt Opt_DataKinds flags
+ .|. ExplicitNamespacesBit `setBitIf` xopt Opt_ExplicitNamespaces flags
+ .|. LambdaCaseBit `setBitIf` xopt Opt_LambdaCase flags
+ .|. BinaryLiteralsBit `setBitIf` xopt Opt_BinaryLiterals flags
+ .|. NegativeLiteralsBit `setBitIf` xopt Opt_NegativeLiterals flags
+ .|. PatternSynonymsBit `setBitIf` xopt Opt_PatternSynonyms flags
--
- setBitIf :: Int -> Bool -> Int
- b `setBitIf` cond | cond = bit b
+ setBitIf :: ExtBits -> Bool -> ExtsBitmap
+ b `setBitIf` cond | cond = xbit b
| otherwise = 0
addWarning :: WarningFlag -> SrcSpan -> SDoc -> P ()
@@ -2434,6 +2432,10 @@ oneWordPrags = Map.fromList([("rules", rulePrag),
("vectorize", token ITvect_prag),
("novectorize", token ITnovect_prag),
("minimal", token ITminimal_prag),
+ ("overlaps", token IToverlaps_prag),
+ ("overlappable", token IToverlappable_prag),
+ ("overlapping", token IToverlapping_prag),
+ ("incoherent", token ITincoherent_prag),
("ctype", token ITctype)])
twoWordPrags = Map.fromList([("inline conlike", token (ITinline_prag Inline ConLike)),
@@ -2447,7 +2449,7 @@ dispatch_pragmas prags span buf len = case Map.lookup (clean_pragma (lexemeToStr
Just found -> found span buf len
Nothing -> lexError "unknown pragma"
-known_pragma :: Map String Action -> AlexAccPred Int
+known_pragma :: Map String Action -> AlexAccPred ExtsBitmap
known_pragma prags _ (AI _ startbuf) _ (AI _ curbuf)
= isKnown && nextCharIsNot curbuf pragmaNameChar
where l = lexemeToString startbuf (byteDiff startbuf curbuf)
diff --git a/compiler/parser/Parser.y.pp b/compiler/parser/Parser.y.pp
index 1715f6cc2f6d..72dfc88fa610 100644
--- a/compiler/parser/Parser.y.pp
+++ b/compiler/parser/Parser.y.pp
@@ -16,8 +16,25 @@
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
-- for details
-module Parser ( parseModule, parseStmt, parseIdentifier, parseType,
- parseHeader ) where
+-- | This module provides the generated Happy parser for Haskell. It exports
+-- a number of parsers which may be used in any library that uses the GHC API.
+-- A common usage pattern is to initialize the parser state with a given string
+-- and then parse that string:
+--
+-- @
+-- runParser :: DynFlags -> String -> P a -> ParseResult a
+-- runParser flags str parser = unP parser parseState
+-- where
+-- filename = "\"
+-- location = mkRealSrcLoc (mkFastString filename) 1 1
+-- buffer = stringToStringBuffer str
+-- parseState = mkPState flags buffer location in
+-- @
+module Parser (parseModule, parseImport, parseStatement,
+ parseDeclaration, parseExpression, parseTypeSignature,
+ parseFullStmt, parseStmt, parseIdentifier,
+ parseType, parseHeader) where
+
import HsSyn
import RdrHsSyn
@@ -269,6 +286,10 @@
'{-# NOVECTORISE' { L _ ITnovect_prag }
'{-# MINIMAL' { L _ ITminimal_prag }
'{-# CTYPE' { L _ ITctype }
+ '{-# OVERLAPPING' { L _ IToverlapping_prag }
+ '{-# OVERLAPPABLE' { L _ IToverlappable_prag }
+ '{-# OVERLAPS' { L _ IToverlaps_prag }
+ '{-# INCOHERENT' { L _ ITincoherent_prag }
'#-}' { L _ ITclose_prag }
'..' { L _ ITdotdot } -- reserved symbols
@@ -360,12 +381,20 @@
%monad { P } { >>= } { return }
%lexer { lexer } { L _ ITeof }
+%tokentype { (Located Token) }
+
+-- Exported parsers
%name parseModule module
+%name parseImport importdecl
+%name parseStatement stmt
+%name parseDeclaration topdecl
+%name parseExpression exp
+%name parseTypeSignature sigdecl
+%name parseFullStmt stmt
%name parseStmt maybe_stmt
%name parseIdentifier identifier
%name parseType ctype
%partial parseHeader header
-%tokentype { (Located Token) }
%%
-----------------------------------------------------------------------------
@@ -654,12 +683,13 @@
{% mkFamDecl (comb3 $1 $2 $4) DataFamily $3 (unLoc $4) }
inst_decl :: { LInstDecl RdrName }
- : 'instance' inst_type where_inst
- { let (binds, sigs, _, ats, adts, _) = cvBindsAndSigs (unLoc $3) in
- let cid = ClsInstDecl { cid_poly_ty = $2, cid_binds = binds
+ : 'instance' overlap_pragma inst_type where_inst
+ { let (binds, sigs, _, ats, adts, _) = cvBindsAndSigs (unLoc $4) in
+ let cid = ClsInstDecl { cid_poly_ty = $3, cid_binds = binds
, cid_sigs = sigs, cid_tyfam_insts = ats
+ , cid_overlap_mode = $2
, cid_datafam_insts = adts }
- in L (comb3 $1 $2 $3) (ClsInstD { cid_inst = cid }) }
+ in L (comb3 $1 $3 $4) (ClsInstD { cid_inst = cid }) }
-- type instance declarations
| 'type' 'instance' ty_fam_inst_eqn
@@ -677,6 +707,14 @@
{% mkDataFamInst (comb4 $1 $4 $6 $7) (unLoc $1) $3 $4
(unLoc $5) (unLoc $6) (unLoc $7) }
+overlap_pragma :: { Maybe OverlapMode }
+ : '{-# OVERLAPPABLE' '#-}' { Just Overlappable }
+ | '{-# OVERLAPPING' '#-}' { Just Overlapping }
+ | '{-# OVERLAPS' '#-}' { Just Overlaps }
+ | '{-# INCOHERENT' '#-}' { Just Incoherent }
+ | {- empty -} { Nothing }
+
+
-- Closed type families
where_type_family :: { Located (FamilyInfo RdrName) }
@@ -783,7 +821,7 @@
-- Glasgow extension: stand-alone deriving declarations
stand_alone_deriving :: { LDerivDecl RdrName }
- : 'deriving' 'instance' inst_type { LL (DerivDecl $3) }
+ : 'deriving' 'instance' overlap_pragma inst_type { LL (DerivDecl $4 $3) }
-----------------------------------------------------------------------------
-- Role annotations
@@ -810,17 +848,29 @@
-- Glasgow extension: pattern synonyms
pattern_synonym_decl :: { LHsDecl RdrName }
- : 'pattern' con vars0 patsyn_token pat { LL . ValD $ mkPatSynBind $2 (PrefixPatSyn $3) $5 $4 }
- | 'pattern' varid conop varid patsyn_token pat { LL . ValD $ mkPatSynBind $3 (InfixPatSyn $2 $4) $6 $5 }
+ : 'pattern' pat '=' pat
+ {% do { (name, args) <- splitPatSyn $2
+ ; return $ LL . ValD $ mkPatSynBind name args $4 ImplicitBidirectional
+ }}
+ | 'pattern' pat '<-' pat
+ {% do { (name, args) <- splitPatSyn $2
+ ; return $ LL . ValD $ mkPatSynBind name args $4 Unidirectional
+ }}
+ | 'pattern' pat '<-' pat where_decls
+ {% do { (name, args) <- splitPatSyn $2
+ ; mg <- toPatSynMatchGroup name $5
+ ; return $ LL . ValD $
+ mkPatSynBind name args $4 (ExplicitBidirectional mg)
+ }}
+
+where_decls :: { Located (OrdList (LHsDecl RdrName)) }
+ : 'where' '{' decls '}' { $3 }
+ | 'where' vocurly decls close { $3 }
vars0 :: { [Located RdrName] }
: {- empty -} { [] }
| varid vars0 { $1 : $2 }
-patsyn_token :: { HsPatSynDir RdrName }
- : '<-' { Unidirectional }
- | '=' { ImplicitBidirectional }
-
-----------------------------------------------------------------------------
-- Nested declarations
@@ -1041,7 +1091,7 @@
: ctypedoc { L1 (mkImplicitHsForAllTy (noLoc []) $1) }
-- Wrap an Implicit forall if there isn't one there already
-sig_vars :: { Located [Located RdrName] }
+sig_vars :: { Located [Located RdrName] } -- Returned in reversed order
: sig_vars ',' var { LL ($3 : unLoc $1) }
| var { L1 [$1] }
@@ -1151,10 +1201,11 @@
| TH_ID_SPLICE { LL $ mkHsSpliceTy $ L1 $ HsVar $
mkUnqual varName (getTH_ID_SPLICE $1) }
-- see Note [Promotion] for the followings
- | SIMPLEQUOTE qconid { LL $ HsTyVar $ unLoc $2 }
- | SIMPLEQUOTE '(' ')' { LL $ HsTyVar $ getRdrName unitDataCon }
+ | SIMPLEQUOTE qcon { LL $ HsTyVar $ unLoc $2 }
| SIMPLEQUOTE '(' ctype ',' comma_types1 ')' { LL $ HsExplicitTupleTy [] ($3 : $5) }
| SIMPLEQUOTE '[' comma_types0 ']' { LL $ HsExplicitListTy placeHolderKind $3 }
+ | SIMPLEQUOTE var { LL $ HsTyVar $ unLoc $2 }
+
| '[' ctype ',' comma_types1 ']' { LL $ HsExplicitListTy placeHolderKind ($2 : $4) }
| INTEGER {% mkTyLit $ LL $ HsNumTy $ getINTEGER $1 }
| STRING {% mkTyLit $ LL $ HsStrTy $ getSTRING $1 }
@@ -1422,7 +1473,7 @@
{% do s <- checkValSig $1 $3
; return (LL $ unitOL (LL $ SigD s)) }
| var ',' sig_vars '::' sigtypedoc
- { LL $ toOL [ LL $ SigD (TypeSig ($1 : unLoc $3) $5) ] }
+ { LL $ toOL [ LL $ SigD (TypeSig ($1 : reverse (unLoc $3)) $5) ] }
| infix prec ops { LL $ toOL [ LL $ SigD (FixSig (FixitySig n (Fixity $2 (unLoc $1))))
| n <- unLoc $3 ] }
| '{-# INLINE' activation qvar '#-}'
@@ -1475,18 +1526,18 @@
exp10 :: { LHsExpr RdrName }
: '\\' apat apats opt_asig '->' exp
- { LL $ HsLam (mkMatchGroup [LL $ Match ($2:$3) $4
+ { LL $ HsLam (mkMatchGroup FromSource [LL $ Match ($2:$3) $4
(unguardedGRHSs $6)
- ]) }
+ ]) }
| 'let' binds 'in' exp { LL $ HsLet (unLoc $2) $4 }
| '\\' 'lcase' altslist
- { LL $ HsLamCase placeHolderType (mkMatchGroup (unLoc $3)) }
+ { LL $ HsLamCase placeHolderType (mkMatchGroup FromSource (unLoc $3)) }
| 'if' exp optSemi 'then' exp optSemi 'else' exp
{% checkDoAndIfThenElse $2 $3 $5 $6 $8 >>
return (LL $ mkHsIf $2 $5 $8) }
| 'if' ifgdpats {% hintMultiWayIf (getLoc $1) >>
return (LL $ HsMultiIf placeHolderType (reverse $ unLoc $2)) }
- | 'case' exp 'of' altslist { LL $ HsCase $2 (mkMatchGroup (unLoc $4)) }
+ | 'case' exp 'of' altslist { LL $ HsCase $2 (mkMatchGroup FromSource (unLoc $4)) }
| '-' fexp { LL $ NegApp $2 noSyntaxExpr }
| 'do' stmtlist { L (comb2 $1 $2) (mkHsDo DoExpr (unLoc $2)) }
diff --git a/compiler/parser/ParserCore.y b/compiler/parser/ParserCore.y
deleted file mode 100644
index 4e7f48c6fce6..000000000000
--- a/compiler/parser/ParserCore.y
+++ /dev/null
@@ -1,397 +0,0 @@
-{
-{-# LANGUAGE BangPatterns #-} -- required for versions of Happy before 1.18.6
-{-# OPTIONS -Wwarn -w #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and fix
--- any warnings in the module. See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
--- for details
-
-module ParserCore ( parseCore ) where
-
-import IfaceSyn
-import ForeignCall
-import RdrHsSyn
-import HsSyn hiding (toHsType, toHsKind)
-import RdrName
-import OccName
-import TypeRep ( TyThing(..) )
-import Type ( Kind,
- liftedTypeKindTyCon, openTypeKindTyCon, unliftedTypeKindTyCon,
- mkTyConApp
- )
-import Kind( mkArrowKind )
-import Name( Name, nameOccName, nameModule, mkExternalName, wiredInNameTyThing_maybe )
-import Module
-import ParserCoreUtils
-import LexCore
-import Literal
-import SrcLoc
-import PrelNames
-import TysPrim
-import TyCon ( TyCon, tyConName )
-import FastString
-import Outputable
-import Data.Char
-import Unique
-
-#include "../HsVersions.h"
-
-}
-
-%name parseCore
-%expect 0
-%tokentype { Token }
-
-%token
- '%module' { TKmodule }
- '%data' { TKdata }
- '%newtype' { TKnewtype }
- '%forall' { TKforall }
- '%rec' { TKrec }
- '%let' { TKlet }
- '%in' { TKin }
- '%case' { TKcase }
- '%of' { TKof }
- '%cast' { TKcast }
- '%note' { TKnote }
- '%external' { TKexternal }
- '%local' { TKlocal }
- '%_' { TKwild }
- '(' { TKoparen }
- ')' { TKcparen }
- '{' { TKobrace }
- '}' { TKcbrace }
- '#' { TKhash}
- '=' { TKeq }
- ':' { TKcolon }
- '::' { TKcoloncolon }
- ':=:' { TKcoloneqcolon }
- '*' { TKstar }
- '->' { TKrarrow }
- '\\' { TKlambda}
- '@' { TKat }
- '.' { TKdot }
- '?' { TKquestion}
- ';' { TKsemicolon }
- NAME { TKname $$ }
- CNAME { TKcname $$ }
- INTEGER { TKinteger $$ }
- RATIONAL { TKrational $$ }
- STRING { TKstring $$ }
- CHAR { TKchar $$ }
-
-%monad { P } { thenP } { returnP }
-%lexer { lexer } { TKEOF }
-
-%%
-
-module :: { HsExtCore RdrName }
- -- : '%module' modid tdefs vdefgs { HsExtCore $2 $3 $4 }
- : '%module' modid tdefs vdefgs { HsExtCore $2 [] [] }
-
-
--------------------------------------------------------------
--- Names: the trickiest bit in here
-
--- A name of the form A.B.C could be:
--- module A.B.C
--- dcon C in module A.B
--- tcon C in module A.B
-modid :: { Module }
- : NAME ':' mparts { undefined }
-
-q_dc_name :: { Name }
- : NAME ':' mparts { undefined }
-
-q_tc_name :: { Name }
- : NAME ':' mparts { undefined }
-
-q_var_occ :: { Name }
- : NAME ':' vparts { undefined }
-
-mparts :: { [String] }
- : CNAME { [$1] }
- | CNAME '.' mparts { $1:$3 }
-
-vparts :: { [String] }
- : var_occ { [$1] }
- | CNAME '.' vparts { $1:$3 }
-
--------------------------------------------------------------
--- Type and newtype declarations are in HsSyn syntax
-
-tdefs :: { [TyClDecl RdrName] }
- : {- empty -} {[]}
- | tdef tdefs {$1:$2}
-
-tdef :: { TyClDecl RdrName }
- : '%data' q_tc_name tv_bndrs '=' '{' cons '}' ';'
- { DataDecl { tcdLName = noLoc (ifaceExtRdrName $2)
- , tcdTyVars = mkHsQTvs (map toHsTvBndr $3)
- , tcdDataDefn = HsDataDefn { dd_ND = DataType, dd_ctxt = noLoc []
- , dd_kindSig = Nothing
- , dd_cons = $6, dd_derivs = Nothing } } }
- | '%newtype' q_tc_name tv_bndrs trep ';'
- { let tc_rdr = ifaceExtRdrName $2 in
- DataDecl { tcdLName = noLoc tc_rdr
- , tcdTyVars = mkHsQTvs (map toHsTvBndr $3)
- , tcdDataDefn = HsDataDefn { dd_ND = NewType, dd_ctxt = noLoc []
- , dd_kindSig = Nothing
- , dd_cons = $4 (rdrNameOcc tc_rdr), dd_derivs = Nothing } } }
-
--- For a newtype we have to invent a fake data constructor name
--- It doesn't matter what it is, because it won't be used
-trep :: { OccName -> [LConDecl RdrName] }
- : {- empty -} { (\ tc_occ -> []) }
- | '=' ty { (\ tc_occ -> let { dc_name = mkRdrUnqual (setOccNameSpace dataName tc_occ) ;
- con_info = PrefixCon [toHsType $2] }
- in [noLoc $ mkSimpleConDecl (noLoc dc_name) []
- (noLoc []) con_info]) }
-
-cons :: { [LConDecl RdrName] }
- : {- empty -} { [] } -- 20060420 Empty data types allowed. jds
- | con { [$1] }
- | con ';' cons { $1:$3 }
-
-con :: { LConDecl RdrName }
- : d_pat_occ attv_bndrs hs_atys
- { noLoc $ mkSimpleConDecl (noLoc (mkRdrUnqual $1)) $2 (noLoc []) (PrefixCon $3) }
--- ToDo: parse record-style declarations
-
-attv_bndrs :: { [LHsTyVarBndr RdrName] }
- : {- empty -} { [] }
- | '@' tv_bndr attv_bndrs { toHsTvBndr $2 : $3 }
-
-hs_atys :: { [LHsType RdrName] }
- : atys { map toHsType $1 }
-
-
----------------------------------------
--- Types
----------------------------------------
-
-atys :: { [IfaceType] }
- : {- empty -} { [] }
- | aty atys { $1:$2 }
-
-aty :: { IfaceType }
- : fs_var_occ { IfaceTyVar $1 }
- | q_tc_name { IfaceTyConApp (IfaceTc $1) [] }
- | '(' ty ')' { $2 }
-
-bty :: { IfaceType }
- : fs_var_occ atys { foldl IfaceAppTy (IfaceTyVar $1) $2 }
- | q_var_occ atys { undefined }
- | q_tc_name atys { IfaceTyConApp (IfaceTc $1) $2 }
- | '(' ty ')' { $2 }
-
-ty :: { IfaceType }
- : bty { $1 }
- | bty '->' ty { IfaceFunTy $1 $3 }
- | '%forall' tv_bndrs '.' ty { foldr IfaceForAllTy $4 $2 }
-
-----------------------------------------------
--- Bindings are in Iface syntax
-
-vdefgs :: { [IfaceBinding] }
- : {- empty -} { [] }
- | let_bind ';' vdefgs { $1 : $3 }
-
-let_bind :: { IfaceBinding }
- : '%rec' '{' vdefs1 '}' { IfaceRec $3 } -- Can be empty. Do we care?
- | vdef { let (b,r) = $1
- in IfaceNonRec b r }
-
-vdefs1 :: { [(IfaceLetBndr, IfaceExpr)] }
- : vdef { [$1] }
- | vdef ';' vdefs1 { $1:$3 }
-
-vdef :: { (IfaceLetBndr, IfaceExpr) }
- : fs_var_occ '::' ty '=' exp { (IfLetBndr $1 $3 NoInfo, $5) }
- | '%local' vdef { $2 }
-
- -- NB: qd_occ includes data constructors, because
- -- we allow data-constructor wrappers at top level
- -- But we discard the module name, because it must be the
- -- same as the module being compiled, and Iface syntax only
- -- has OccNames in binding positions. Ah, but it has Names now!
-
----------------------------------------
--- Binders
-bndr :: { IfaceBndr }
- : '@' tv_bndr { IfaceTvBndr $2 }
- | id_bndr { IfaceIdBndr $1 }
-
-bndrs :: { [IfaceBndr] }
- : bndr { [$1] }
- | bndr bndrs { $1:$2 }
-
-id_bndr :: { IfaceIdBndr }
- : '(' fs_var_occ '::' ty ')' { ($2,$4) }
-
-tv_bndr :: { IfaceTvBndr }
- : fs_var_occ { ($1, ifaceLiftedTypeKind) }
- | '(' fs_var_occ '::' akind ')' { ($2, $4) }
-
-tv_bndrs :: { [IfaceTvBndr] }
- : {- empty -} { [] }
- | tv_bndr tv_bndrs { $1:$2 }
-
-akind :: { IfaceKind }
- : '*' { ifaceLiftedTypeKind }
- | '#' { ifaceUnliftedTypeKind }
- | '?' { ifaceOpenTypeKind }
- | '(' kind ')' { $2 }
-
-kind :: { IfaceKind }
- : akind { $1 }
- | akind '->' kind { ifaceArrow $1 $3 }
-
------------------------------------------
--- Expressions
-
-aexp :: { IfaceExpr }
- : fs_var_occ { IfaceLcl $1 }
- | q_var_occ { IfaceExt $1 }
- | q_dc_name { IfaceExt $1 }
- | lit { IfaceLit $1 }
- | '(' exp ')' { $2 }
-
-fexp :: { IfaceExpr }
- : fexp aexp { IfaceApp $1 $2 }
- | fexp '@' aty { IfaceApp $1 (IfaceType $3) }
- | aexp { $1 }
-
-exp :: { IfaceExpr }
- : fexp { $1 }
- | '\\' bndrs '->' exp { foldr IfaceLam $4 $2 }
- | '%let' let_bind '%in' exp { IfaceLet $2 $4 }
--- gaw 2004
- | '%case' '(' ty ')' aexp '%of' id_bndr
- '{' alts1 '}' { IfaceCase $5 (fst $7) $9 }
--- The following line is broken and is hard to fix. Not fixing now
--- because this whole parser is bitrotten anyway.
--- Richard Eisenberg, July 2013
--- | '%cast' aexp aty { IfaceCast $2 $3 }
--- No InlineMe any more
--- | '%note' STRING exp
--- { case $2 of
--- --"SCC" -> IfaceNote (IfaceSCC "scc") $3
--- "InlineMe" -> IfaceNote IfaceInlineMe $3
--- }
- | '%external' STRING aty { IfaceFCall (ForeignCall.CCall
- (CCallSpec (StaticTarget (mkFastString $2) Nothing True)
- CCallConv PlaySafe))
- $3 }
-
-alts1 :: { [IfaceAlt] }
- : alt { [$1] }
- | alt ';' alts1 { $1:$3 }
-
-alt :: { IfaceAlt }
- : q_dc_name bndrs '->' exp
- { (IfaceDataAlt $1, map ifaceBndrName $2, $4) }
- -- The external syntax currently includes the types of the
- -- the args, but they aren't needed internally
- -- Nor is the module qualifier
- | q_dc_name '->' exp
- { (IfaceDataAlt $1, [], $3) }
- | lit '->' exp
- { (IfaceLitAlt $1, [], $3) }
- | '%_' '->' exp
- { (IfaceDefault, [], $3) }
-
-lit :: { Literal }
- : '(' INTEGER '::' aty ')' { convIntLit $2 $4 }
- | '(' RATIONAL '::' aty ')' { convRatLit $2 $4 }
- | '(' CHAR '::' aty ')' { MachChar $2 }
- | '(' STRING '::' aty ')' { MachStr (fastStringToByteString (mkFastString $2)) }
-
-fs_var_occ :: { FastString }
- : NAME { mkFastString $1 }
-
-var_occ :: { String }
- : NAME { $1 }
-
-
--- Data constructor in a pattern or data type declaration; use the dataName,
--- because that's what we expect in Core case patterns
-d_pat_occ :: { OccName }
- : CNAME { mkOccName dataName $1 }
-
-{
-
-ifaceKind kc = IfaceTyConApp kc []
-
-ifaceBndrName (IfaceIdBndr (n,_)) = n
-ifaceBndrName (IfaceTvBndr (n,_)) = n
-
-convIntLit :: Integer -> IfaceType -> Literal
-convIntLit i (IfaceTyConApp tc [])
- | tc `eqTc` intPrimTyCon = MachInt i
- | tc `eqTc` wordPrimTyCon = MachWord i
- | tc `eqTc` charPrimTyCon = MachChar (chr (fromInteger i))
- | tc `eqTc` addrPrimTyCon && i == 0 = MachNullAddr
-convIntLit i aty
- = pprPanic "Unknown integer literal type" (ppr aty)
-
-convRatLit :: Rational -> IfaceType -> Literal
-convRatLit r (IfaceTyConApp tc [])
- | tc `eqTc` floatPrimTyCon = MachFloat r
- | tc `eqTc` doublePrimTyCon = MachDouble r
-convRatLit i aty
- = pprPanic "Unknown rational literal type" (ppr aty)
-
-eqTc :: IfaceTyCon -> TyCon -> Bool -- Ugh!
-eqTc (IfaceTc name) tycon = name == tyConName tycon
-
--- Tiresomely, we have to generate both HsTypes (in type/class decls)
--- and IfaceTypes (in Core expressions). So we parse them as IfaceTypes,
--- and convert to HsTypes here. But the IfaceTypes we can see here
--- are very limited (see the productions for 'ty'), so the translation
--- isn't hard
-toHsType :: IfaceType -> LHsType RdrName
-toHsType (IfaceTyVar v) = noLoc $ HsTyVar (mkRdrUnqual (mkTyVarOccFS v))
-toHsType (IfaceAppTy t1 t2) = noLoc $ HsAppTy (toHsType t1) (toHsType t2)
-toHsType (IfaceFunTy t1 t2) = noLoc $ HsFunTy (toHsType t1) (toHsType t2)
-toHsType (IfaceTyConApp (IfaceTc tc) ts) = foldl mkHsAppTy (noLoc $ HsTyVar (ifaceExtRdrName tc)) (map toHsType ts)
-toHsType (IfaceForAllTy tv t) = add_forall (toHsTvBndr tv) (toHsType t)
-
--- Only a limited form of kind will be encountered... hopefully
-toHsKind :: IfaceKind -> LHsKind RdrName
--- IA0_NOTE: Shouldn't we add kind variables?
-toHsKind (IfaceFunTy ifK1 ifK2) = noLoc $ HsFunTy (toHsKind ifK1) (toHsKind ifK2)
-toHsKind (IfaceTyConApp ifKc []) = noLoc $ HsTyVar (nameRdrName (tyConName (toKindTc ifKc)))
-toHsKind other = pprPanic "toHsKind" (ppr other)
-
-toKindTc :: IfaceTyCon -> TyCon
-toKindTc (IfaceTc n) | Just (ATyCon tc) <- wiredInNameTyThing_maybe n = tc
-toKindTc other = pprPanic "toKindTc" (ppr other)
-
-ifaceTcType ifTc = IfaceTyConApp ifTc []
-
-ifaceLiftedTypeKind = ifaceTcType (IfaceTc liftedTypeKindTyConName)
-ifaceOpenTypeKind = ifaceTcType (IfaceTc openTypeKindTyConName)
-ifaceUnliftedTypeKind = ifaceTcType (IfaceTc unliftedTypeKindTyConName)
-
-ifaceArrow ifT1 ifT2 = IfaceFunTy ifT1 ifT2
-
-toHsTvBndr :: IfaceTvBndr -> LHsTyVarBndr RdrName
-toHsTvBndr (tv,k) = noLoc $ KindedTyVar (mkRdrUnqual (mkTyVarOccFS tv)) bsig
- where
- bsig = toHsKind k
-
-ifaceExtRdrName :: Name -> RdrName
-ifaceExtRdrName name = mkOrig (nameModule name) (nameOccName name)
-ifaceExtRdrName other = pprPanic "ParserCore.ifaceExtRdrName" (ppr other)
-
-add_forall tv (L _ (HsForAllTy exp tvs cxt t))
- = noLoc $ HsForAllTy exp (mkHsQTvs (tv : hsQTvBndrs tvs)) cxt t
-add_forall tv t
- = noLoc $ HsForAllTy Explicit (mkHsQTvs [tv]) (noLoc []) t
-
-happyError :: P a
-happyError s l = failP (show l ++ ": Parse error\n") (take 100 s) l
-}
-
diff --git a/compiler/parser/ParserCoreUtils.hs b/compiler/parser/ParserCoreUtils.hs
deleted file mode 100644
index 8f67d962396d..000000000000
--- a/compiler/parser/ParserCoreUtils.hs
+++ /dev/null
@@ -1,77 +0,0 @@
-module ParserCoreUtils where
-
-import Exception
-import System.IO
-
-data ParseResult a = OkP a | FailP String
-type P a = String -> Int -> ParseResult a
-
-thenP :: P a -> (a -> P b) -> P b
-m `thenP` k = \ s l ->
- case m s l of
- OkP a -> k a s l
- FailP s -> FailP s
-
-returnP :: a -> P a
-returnP m _ _ = OkP m
-
-failP :: String -> P a
-failP s s' _ = FailP (s ++ ":" ++ s')
-
-getCoreModuleName :: FilePath -> IO String
-getCoreModuleName fpath =
- catchIO (do
- h <- openFile fpath ReadMode
- ls <- hGetContents h
- let mo = findMod (words ls)
- -- make sure we close up the file right away.
- (length mo) `seq` return ()
- hClose h
- return mo)
- (\ _ -> return "Main")
- where
- findMod [] = "Main"
- -- TODO: this should just return the module name, without the package name
- findMod ("%module":m:_) = m
- findMod (_:xs) = findMod xs
-
-
-data Token =
- TKmodule
- | TKdata
- | TKnewtype
- | TKforall
- | TKrec
- | TKlet
- | TKin
- | TKcase
- | TKof
- | TKcast
- | TKnote
- | TKexternal
- | TKlocal
- | TKwild
- | TKoparen
- | TKcparen
- | TKobrace
- | TKcbrace
- | TKhash
- | TKeq
- | TKcolon
- | TKcoloncolon
- | TKcoloneqcolon
- | TKstar
- | TKrarrow
- | TKlambda
- | TKat
- | TKdot
- | TKquestion
- | TKsemicolon
- | TKname String
- | TKcname String
- | TKinteger Integer
- | TKrational Rational
- | TKstring String
- | TKchar Char
- | TKEOF
-
diff --git a/compiler/parser/RdrHsSyn.lhs b/compiler/parser/RdrHsSyn.lhs
index b1e177a3a9cb..84a284f0abe5 100644
--- a/compiler/parser/RdrHsSyn.lhs
+++ b/compiler/parser/RdrHsSyn.lhs
@@ -4,6 +4,8 @@ o%
Functions over HsSyn specialised to RdrName.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module RdrHsSyn (
mkHsOpApp,
mkHsIntegral, mkHsFractional, mkHsIsString,
@@ -15,6 +17,7 @@ module RdrHsSyn (
mkTyFamInst,
mkFamDecl,
splitCon, mkInlinePragma,
+ splitPatSyn, toPatSynMatchGroup,
mkRecConstrOrUpdate, -- HsExp -> [HsFieldUpdate] -> P HsExp
mkTyLit,
mkTyClD, mkInstD,
@@ -32,6 +35,7 @@ module RdrHsSyn (
mkGadtDecl, -- [Located RdrName] -> LHsType RdrName -> ConDecl RdrName
mkSimpleConDecl,
mkDeprecatedGadtRecordDecl,
+ mkATDefault,
-- Bunch of functions in the parser monad for
-- checking and constructing values
@@ -71,7 +75,7 @@ import TysWiredIn ( unitTyCon, unitDataCon )
import ForeignCall
import OccName ( srcDataName, varName, isDataOcc, isTcOcc,
occNameString )
-import PrelNames ( forall_tv_RDR )
+import PrelNames ( forall_tv_RDR, allNameStrings )
import DynFlags
import SrcLoc
import OrdList ( OrdList, fromOL )
@@ -122,16 +126,31 @@ mkClassDecl :: SrcSpan
-> P (LTyClDecl RdrName)
mkClassDecl loc (L _ (mcxt, tycl_hdr)) fds where_cls
- = do { let (binds, sigs, ats, at_defs, _, docs) = cvBindsAndSigs (unLoc where_cls)
+ = do { let (binds, sigs, ats, at_insts, _, docs) = cvBindsAndSigs (unLoc where_cls)
cxt = fromMaybe (noLoc []) mcxt
; (cls, tparams) <- checkTyClHdr tycl_hdr
- ; tyvars <- checkTyVars (ptext (sLit "class")) whereDots
- cls tparams -- Only type vars allowed
+ ; tyvars <- checkTyVarsP (ptext (sLit "class")) whereDots cls tparams
+ ; at_defs <- mapM (eitherToP . mkATDefault) at_insts
; return (L loc (ClassDecl { tcdCtxt = cxt, tcdLName = cls, tcdTyVars = tyvars,
tcdFDs = unLoc fds, tcdSigs = sigs, tcdMeths = binds,
tcdATs = ats, tcdATDefs = at_defs, tcdDocs = docs,
tcdFVs = placeHolderNames })) }
+mkATDefault :: LTyFamInstDecl RdrName
+ -> Either (SrcSpan, SDoc) (LTyFamDefltEqn RdrName)
+-- Take a type-family instance declaration and turn it into
+-- a type-family default equation for a class declaration
+-- We parse things as the former and use this function to convert to the latter
+--
+-- We use the Either monad because this also called
+-- from Convert.hs
+mkATDefault (L loc (TyFamInstDecl { tfid_eqn = L _ e }))
+ | TyFamEqn { tfe_tycon = tc, tfe_pats = pats, tfe_rhs = rhs } <- e
+ = do { tvs <- checkTyVars (ptext (sLit "default")) equalsDots tc (hswb_cts pats)
+ ; return (L loc (TyFamEqn { tfe_tycon = tc
+ , tfe_pats = tvs
+ , tfe_rhs = rhs })) }
+
mkTyData :: SrcSpan
-> NewOrData
-> Maybe CType
@@ -142,7 +161,7 @@ mkTyData :: SrcSpan
-> P (LTyClDecl RdrName)
mkTyData loc new_or_data cType (L _ (mcxt, tycl_hdr)) ksig data_cons maybe_deriv
= do { (tc, tparams) <- checkTyClHdr tycl_hdr
- ; tyvars <- checkTyVars (ppr new_or_data) equalsDots tc tparams
+ ; tyvars <- checkTyVarsP (ppr new_or_data) equalsDots tc tparams
; defn <- mkDataDefn new_or_data cType mcxt ksig data_cons maybe_deriv
; return (L loc (DataDecl { tcdLName = tc, tcdTyVars = tyvars,
tcdDataDefn = defn,
@@ -170,7 +189,7 @@ mkTySynonym :: SrcSpan
-> P (LTyClDecl RdrName)
mkTySynonym loc lhs rhs
= do { (tc, tparams) <- checkTyClHdr lhs
- ; tyvars <- checkTyVars (ptext (sLit "type")) equalsDots tc tparams
+ ; tyvars <- checkTyVarsP (ptext (sLit "type")) equalsDots tc tparams
; return (L loc (SynDecl { tcdLName = tc, tcdTyVars = tyvars
, tcdRhs = rhs, tcdFVs = placeHolderNames })) }
@@ -179,9 +198,9 @@ mkTyFamInstEqn :: LHsType RdrName
-> P (TyFamInstEqn RdrName)
mkTyFamInstEqn lhs rhs
= do { (tc, tparams) <- checkTyClHdr lhs
- ; return (TyFamInstEqn { tfie_tycon = tc
- , tfie_pats = mkHsWithBndrs tparams
- , tfie_rhs = rhs }) }
+ ; return (TyFamEqn { tfe_tycon = tc
+ , tfe_pats = mkHsWithBndrs tparams
+ , tfe_rhs = rhs }) }
mkDataFamInst :: SrcSpan
-> NewOrData
@@ -212,7 +231,7 @@ mkFamDecl :: SrcSpan
-> P (LTyClDecl RdrName)
mkFamDecl loc info lhs ksig
= do { (tc, tparams) <- checkTyClHdr lhs
- ; tyvars <- checkTyVars (ppr info) equals_or_where tc tparams
+ ; tyvars <- checkTyVarsP (ppr info) equals_or_where tc tparams
; return (L loc (FamDecl (FamilyDecl { fdInfo = info, fdLName = tc
, fdTyVars = tyvars, fdKindSig = ksig }))) }
where
@@ -315,7 +334,7 @@ cvBindsAndSigs fb = go (fromOL fb)
go [] = (emptyBag, [], [], [], [], [])
go (L l (SigD s) : ds) = (bs, L l s : ss, ts, tfis, dfis, docs)
where (bs, ss, ts, tfis, dfis, docs) = go ds
- go (L l (ValD b) : ds) = ((FromSource, b') `consBag` bs, ss, ts, tfis, dfis, docs)
+ go (L l (ValD b) : ds) = (b' `consBag` bs, ss, ts, tfis, dfis, docs)
where (b', ds') = getMonoBind (L l b) ds
(bs, ss, ts, tfis, dfis, docs) = go ds'
go (L l (TyClD (FamDecl t)) : ds) = (bs, ss, L l t : ts, tfis, dfis, docs)
@@ -410,6 +429,56 @@ splitCon ty
mk_rest [L _ (HsRecTy flds)] = RecCon flds
mk_rest ts = PrefixCon ts
+splitPatSyn :: LPat RdrName
+ -> P (Located RdrName, HsPatSynDetails (Located RdrName))
+splitPatSyn (L _ (ParPat pat)) = splitPatSyn pat
+splitPatSyn pat@(L loc (ConPatIn con details)) = do
+ details' <- case details of
+ PrefixCon pats -> liftM PrefixPatSyn (mapM patVar pats)
+ InfixCon pat1 pat2 -> liftM2 InfixPatSyn (patVar pat1) (patVar pat2)
+ RecCon{} -> recordPatSynErr loc pat
+ return (con, details')
+ where
+ patVar :: LPat RdrName -> P (Located RdrName)
+ patVar (L loc (VarPat v)) = return $ L loc v
+ patVar (L _ (ParPat pat)) = patVar pat
+ patVar (L loc pat) = parseErrorSDoc loc $
+ text "Pattern synonym arguments must be variable names:" $$
+ ppr pat
+splitPatSyn pat@(L loc _) = parseErrorSDoc loc $
+ text "invalid pattern synonym declaration:" $$ ppr pat
+
+recordPatSynErr :: SrcSpan -> LPat RdrName -> P a
+recordPatSynErr loc pat =
+ parseErrorSDoc loc $
+ text "record syntax not supported for pattern synonym declarations:" $$
+ ppr pat
+
+toPatSynMatchGroup :: Located RdrName -> Located (OrdList (LHsDecl RdrName)) -> P (MatchGroup RdrName (LHsExpr RdrName))
+toPatSynMatchGroup (L _ patsyn_name) (L _ decls) =
+ do { matches <- mapM fromDecl (fromOL decls)
+ ; return $ mkMatchGroup FromSource matches }
+ where
+ fromDecl (L loc decl@(ValD (PatBind pat@(L _ (ConPatIn (L _ name) details)) rhs _ _ _))) =
+ do { unless (name == patsyn_name) $
+ wrongNameBindingErr loc decl
+ ; match <- case details of
+ PrefixCon pats -> return $ Match pats Nothing rhs
+ InfixCon pat1 pat2 -> return $ Match [pat1, pat2] Nothing rhs
+ RecCon{} -> recordPatSynErr loc pat
+ ; return $ L loc match }
+ fromDecl (L loc decl) = extraDeclErr loc decl
+
+ extraDeclErr loc decl =
+ parseErrorSDoc loc $
+ text "pattern synonym 'where' clause must contain a single binding:" $$
+ ppr decl
+
+ wrongNameBindingErr loc decl =
+ parseErrorSDoc loc $
+ text "pattern synonym 'where' clause must bind the pattern synonym's name" <+>
+ quotes (ppr patsyn_name) $$ ppr decl
+
mkDeprecatedGadtRecordDecl :: SrcSpan
-> Located RdrName
-> [ConDeclField RdrName]
@@ -500,26 +569,42 @@ we can bring x,y into scope. So:
* For RecCon we do not
\begin{code}
-checkTyVars :: SDoc -> SDoc -> Located RdrName -> [LHsType RdrName] -> P (LHsTyVarBndrs RdrName)
+checkTyVarsP :: SDoc -> SDoc -> Located RdrName -> [LHsType RdrName] -> P (LHsTyVarBndrs RdrName)
+-- Same as checkTyVars, but in the P monad
+checkTyVarsP pp_what equals_or_where tc tparms
+ = eitherToP $ checkTyVars pp_what equals_or_where tc tparms
+
+eitherToP :: Either (SrcSpan, SDoc) a -> P a
+-- Adapts the Either monad to the P monad
+eitherToP (Left (loc, doc)) = parseErrorSDoc loc doc
+eitherToP (Right thing) = return thing
+checkTyVars :: SDoc -> SDoc -> Located RdrName -> [LHsType RdrName]
+ -> Either (SrcSpan, SDoc) (LHsTyVarBndrs RdrName)
-- Check whether the given list of type parameters are all type variables
--- (possibly with a kind signature).
-checkTyVars pp_what equals_or_where tc tparms = do { tvs <- mapM chk tparms
- ; return (mkHsQTvs tvs) }
+-- (possibly with a kind signature)
+-- We use the Either monad because it's also called (via mkATDefault) from
+-- Convert.hs
+checkTyVars pp_what equals_or_where tc tparms
+ = do { tvs <- mapM chk tparms
+ ; return (mkHsQTvs tvs) }
where
+
-- Check that the name space is correct!
chk (L l (HsKindSig (L _ (HsTyVar tv)) k))
| isRdrTyVar tv = return (L l (KindedTyVar tv k))
chk (L l (HsTyVar tv))
| isRdrTyVar tv = return (L l (UserTyVar tv))
- chk t@(L l _)
- = parseErrorSDoc l $
- vcat [ ptext (sLit "Unexpected type") <+> quotes (ppr t)
- , ptext (sLit "In the") <+> pp_what <+> ptext (sLit "declaration for") <+> quotes (ppr tc)
- , vcat[ (ptext (sLit "A") <+> pp_what <+> ptext (sLit "declaration should have form"))
- , nest 2 (pp_what <+> ppr tc <+> ptext (sLit "a b c")
- <+> equals_or_where) ] ]
+ chk t@(L loc _)
+ = Left (loc,
+ vcat [ ptext (sLit "Unexpected type") <+> quotes (ppr t)
+ , ptext (sLit "In the") <+> pp_what <+> ptext (sLit "declaration for") <+> quotes (ppr tc)
+ , vcat[ (ptext (sLit "A") <+> pp_what <+> ptext (sLit "declaration should have form"))
+ , nest 2 (pp_what <+> ppr tc
+ <+> hsep (map text (takeList tparms allNameStrings))
+ <+> equals_or_where) ] ])
whereDots, equalsDots :: SDoc
+-- Second argument to checkTyVars
whereDots = ptext (sLit "where ...")
equalsDots = ptext (sLit "= ...")
@@ -666,7 +751,7 @@ checkAPat msg loc e0 = do
ExplicitTuple es b
| all tupArgPresent es -> do ps <- mapM (checkLPat msg) [e | Present e <- es]
- return (TuplePat ps b placeHolderType)
+ return (TuplePat ps b [])
| otherwise -> parseErrorSDoc loc (text "Illegal tuple section in pattern:" $$ ppr e0)
RecordCon c _ (HsRecFields fs dd)
@@ -735,7 +820,7 @@ checkFunBind msg lhs_loc fun is_infix pats opt_sig (L rhs_span grhss)
makeFunBind :: Located id -> Bool -> [LMatch id (LHsExpr id)] -> HsBind id
-- Like HsUtils.mkFunBind, but we need to be able to set the fixity too
makeFunBind fn is_infix ms
- = FunBind { fun_id = fn, fun_infix = is_infix, fun_matches = mkMatchGroup ms,
+ = FunBind { fun_id = fn, fun_infix = is_infix, fun_matches = mkMatchGroup FromSource ms,
fun_co_fn = idHsWrapper, bind_fvs = placeHolderNames, fun_tick = Nothing }
checkPatBind :: SDoc
diff --git a/compiler/parser/cutils.c b/compiler/parser/cutils.c
index c42ec9e3cec8..d714a0cb2ae7 100644
--- a/compiler/parser/cutils.c
+++ b/compiler/parser/cutils.c
@@ -37,7 +37,7 @@ ghc_memcmp_off( HsPtr a1, HsInt i, HsPtr a2, HsInt len )
}
void
-enableTimingStats( void ) /* called from the driver */
+enableTimingStats( void ) /* called from the driver */
{
RtsFlags.GcFlags.giveStats = ONELINE_GC_STATS;
}
@@ -47,9 +47,7 @@ setHeapSize( HsInt size )
{
RtsFlags.GcFlags.heapSizeSuggestion = size / BLOCK_SIZE;
if (RtsFlags.GcFlags.maxHeapSize != 0 &&
- RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) {
- RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
+ RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) {
+ RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
}
}
-
-
diff --git a/compiler/prelude/ForeignCall.lhs b/compiler/prelude/ForeignCall.lhs
index 5072908e6af3..232f69f67f61 100644
--- a/compiler/prelude/ForeignCall.lhs
+++ b/compiler/prelude/ForeignCall.lhs
@@ -117,7 +117,7 @@ data CCallTarget
= StaticTarget
CLabelString -- C-land name of label.
- (Maybe PackageId) -- What package the function is in.
+ (Maybe PackageKey) -- What package the function is in.
-- If Nothing, then it's taken to be in the current package.
-- Note: This information is only used for PrimCalls on Windows.
-- See CLabel.labelDynamic and CoreToStg.coreToStgApp
diff --git a/compiler/prelude/PrelInfo.lhs b/compiler/prelude/PrelInfo.lhs
index bfcea1c039e1..eaefff236406 100644
--- a/compiler/prelude/PrelInfo.lhs
+++ b/compiler/prelude/PrelInfo.lhs
@@ -4,13 +4,7 @@
\section[PrelInfo]{The @PrelInfo@ interface to the compiler's prelude knowledge}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module PrelInfo (
wiredInIds, ghcPrimIds,
primOpRules, builtinRules,
@@ -18,7 +12,7 @@ module PrelInfo (
ghcPrimExports,
wiredInThings, basicKnownKeyNames,
primOpId,
-
+
-- Random other things
maybeCharLikeCon, maybeIntLikeCon,
@@ -48,9 +42,9 @@ import Data.Array
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[builtinNameInfo]{Lookup built-in names}
-%* *
+%* *
%************************************************************************
Notes about wired in things
@@ -58,13 +52,13 @@ Notes about wired in things
* Wired-in things are Ids\/TyCons that are completely known to the compiler.
They are global values in GHC, (e.g. listTyCon :: TyCon).
-* A wired in Name contains the thing itself inside the Name:
- see Name.wiredInNameTyThing_maybe
- (E.g. listTyConName contains listTyCon.
+* A wired in Name contains the thing itself inside the Name:
+ see Name.wiredInNameTyThing_maybe
+ (E.g. listTyConName contains listTyCon.
* The name cache is initialised with (the names of) all wired-in things
-* The type checker sees if the Name is wired in before looking up
+* The type checker sees if the Name is wired in before looking up
the name in the type environment. So the type envt itself contains
no wired in things.
@@ -77,17 +71,17 @@ wiredInThings :: [TyThing]
-- This list is used only to initialise HscMain.knownKeyNames
-- to ensure that when you say "Prelude.map" in your source code, you
-- get a Name with the correct known key (See Note [Known-key names])
-wiredInThings
+wiredInThings
= concat
- [ -- Wired in TyCons and their implicit Ids
- tycon_things
- , concatMap implicitTyThings tycon_things
+ [ -- Wired in TyCons and their implicit Ids
+ tycon_things
+ , concatMap implicitTyThings tycon_things
- -- Wired in Ids
- , map AnId wiredInIds
+ -- Wired in Ids
+ , map AnId wiredInIds
- -- PrimOps
- , map (AnId . primOpId) allThePrimOps
+ -- PrimOps
+ , map (AnId . primOpId) allThePrimOps
]
where
tycon_things = map ATyCon ([funTyCon] ++ primTyCons ++ wiredInTyCons
@@ -99,16 +93,16 @@ sense of them in interface pragmas. It's cool, though they all have
"non-standard" names, so they won't get past the parser in user code.
%************************************************************************
-%* *
- PrimOpIds
-%* *
+%* *
+ PrimOpIds
+%* *
%************************************************************************
\begin{code}
-primOpIds :: Array Int Id
+primOpIds :: Array Int Id
-- A cache of the PrimOp Ids, indexed by PrimOp tag
-primOpIds = array (1,maxPrimOpTag) [ (primOpTag op, mkPrimOpId op)
- | op <- allThePrimOps ]
+primOpIds = array (1,maxPrimOpTag) [ (primOpTag op, mkPrimOpId op)
+ | op <- allThePrimOps ]
primOpId :: PrimOp -> Id
primOpId op = primOpIds ! primOpTag op
@@ -116,12 +110,12 @@ primOpId op = primOpIds ! primOpTag op
%************************************************************************
-%* *
+%* *
\subsection{Export lists for pseudo-modules (GHC.Prim)}
-%* *
+%* *
%************************************************************************
-GHC.Prim "exports" all the primops and primitive types, some
+GHC.Prim "exports" all the primops and primitive types, some
wired-in Ids.
\begin{code}
@@ -129,15 +123,16 @@ ghcPrimExports :: [IfaceExport]
ghcPrimExports
= map (Avail . idName) ghcPrimIds ++
map (Avail . idName . primOpId) allThePrimOps ++
- [ AvailTC n [n]
- | tc <- funTyCon : coercibleTyCon : primTyCons, let n = tyConName tc ]
+ [ AvailTC n [n]
+ | tc <- funTyCon : primTyCons, let n = tyConName tc ]
\end{code}
+
%************************************************************************
-%* *
+%* *
\subsection{Built-in keys}
-%* *
+%* *
%************************************************************************
ToDo: make it do the ``like'' part properly (as in 0.26 and before).
@@ -150,9 +145,9 @@ maybeIntLikeCon con = con `hasKey` intDataConKey
%************************************************************************
-%* *
+%* *
\subsection{Class predicates}
-%* *
+%* *
%************************************************************************
\begin{code}
diff --git a/compiler/prelude/PrelNames.lhs b/compiler/prelude/PrelNames.lhs
index 3f00c6242c7c..5757ba123428 100644
--- a/compiler/prelude/PrelNames.lhs
+++ b/compiler/prelude/PrelNames.lhs
@@ -37,56 +37,73 @@ Nota Bene: all Names defined in here should come from the base package
Note [Known-key names]
~~~~~~~~~~~~~~~~~~~~~~
+It is *very* important that the compiler gives wired-in things and
+things with "known-key" names the correct Uniques wherever they
+occur. We have to be careful about this in exactly two places:
-It is *very* important that the compiler gives wired-in things and things with "known-key" names
-the correct Uniques wherever they occur. We have to be careful about this in exactly two places:
+ 1. When we parse some source code, renaming the AST better yield an
+ AST whose Names have the correct uniques
- 1. When we parse some source code, renaming the AST better yield an AST whose Names have the
- correct uniques
-
- 2. When we read an interface file, the read-in gubbins better have the right uniques
+ 2. When we read an interface file, the read-in gubbins better have
+ the right uniques
This is accomplished through a combination of mechanisms:
- 1. When parsing source code, the RdrName-decorated AST has some RdrNames which are Exact. These are
- wired-in RdrNames where the we could directly tell from the parsed syntax what Name to use. For
- example, when we parse a [] in a type we can just insert an Exact RdrName Name with the listTyConKey.
-
- Currently, I believe this is just an optimisation: it would be equally valid to just output Orig
- RdrNames that correctly record the module etc we expect the final Name to come from. However,
- were we to eliminate isTupleOcc_maybe it would become essential (see point 3).
-
- 2. The knownKeyNames (which consist of the basicKnownKeyNames from the module, and those names reachable
- via the wired-in stuff from TysWiredIn) are used to initialise the "original name cache" in IfaceEnv.
- This initialization ensures that when the type checker or renamer (both of which use IfaceEnv) look up
- an original name (i.e. a pair of a Module and an OccName) for a known-key name they get the correct Unique.
+ 1. When parsing source code, the RdrName-decorated AST has some
+ RdrNames which are Exact. These are wired-in RdrNames where the
+ we could directly tell from the parsed syntax what Name to
+ use. For example, when we parse a [] in a type we can just insert
+ an Exact RdrName Name with the listTyConKey.
+
+ Currently, I believe this is just an optimisation: it would be
+ equally valid to just output Orig RdrNames that correctly record
+ the module etc we expect the final Name to come from. However,
+ were we to eliminate isBuiltInOcc_maybe it would become essential
+ (see point 3).
+
+ 2. The knownKeyNames (which consist of the basicKnownKeyNames from
+ the module, and those names reachable via the wired-in stuff from
+ TysWiredIn) are used to initialise the "OrigNameCache" in
+ IfaceEnv. This initialization ensures that when the type checker
+ or renamer (both of which use IfaceEnv) look up an original name
+ (i.e. a pair of a Module and an OccName) for a known-key name
+ they get the correct Unique.
+
+ This is the most important mechanism for ensuring that known-key
+ stuff gets the right Unique, and is why it is so important to
+ place your known-key names in the appropriate lists.
+
+ 3. For "infinite families" of known-key names (i.e. tuples), we have
+ to be extra careful. Because there are an infinite number of
+ these things, we cannot add them to the list of known-key names
+ used to initialise the OrigNameCache. Instead, we have to
+ rely on never having to look them up in that cache.
- This is the most important mechanism for ensuring that known-key stuff gets the right Unique, and is why
- it is so important to place your known-key names in the appropriate lists.
+ This is accomplished through a variety of mechanisms:
- 3. For "infinite families" of known-key names (i.e. tuples, Any tycons and implicit parameter TyCons), we
- have to be extra careful. Because there are an infinite number of these things, we cannot add them to
- the list of known-key names used to initialise the original name cache. Instead, we have to rely on
- never having to look them up in that cache.
+ a) The parser recognises them specially and generates an
+ Exact Name (hence not looked up in the orig-name cache)
- This is accomplished through a variety of mechanisms:
+ b) The known infinite families of names are specially
+ serialised by BinIface.putName, with that special treatment
+ detected when we read back to ensure that we get back to the
+ correct uniques.
- a) The known infinite families of names are specially serialised by BinIface.putName, with that special treatment
- detected when we read back to ensure that we get back to the correct uniques.
+ Most of the infinite families cannot occur in source code,
+ so mechanisms (a,b) sufficies to ensure that they always have
+ the right Unique. In particular, implicit param TyCon names,
+ constraint tuples and Any TyCons cannot be mentioned by the
+ user.
- b) Most of the infinite families cannot occur in source code, so mechanism a) sufficies to ensure that they
- always have the right Unique. In particular, implicit param TyCon names, constraint tuples and Any TyCons
- cannot be mentioned by the user.
+ c) IfaceEnv.lookupOrigNameCache uses isBuiltInOcc_maybe to map
+ built-in syntax directly onto the corresponding name, rather
+ than trying to find it in the original-name cache.
- c) Tuple TyCon/DataCon names have a special hack (isTupleOcc_maybe) that is used by the original name cache
- lookup routine to detect tuple names and give them the right Unique. You might think that this is unnecessary
- because tuple TyCon/DataCons are parsed as Exact RdrNames and *don't* appear as original names in interface files
- (because serialization gives them special treatment), so we will never look them up in the original name cache.
+ See also Note [Built-in syntax and the OrigNameCache]
- However, there is a subtle reason why this is not the case: if you use setRdrNameSpace on an Exact RdrName
- it may be turned into an Orig RdrName. So if the original name was an Exact tuple Name we might end up with
- an Orig instead, which *will* lead to an original name cache query.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module PrelNames (
Unique, Uniquable(..), hasKey, -- Re-exported for convenience
@@ -111,6 +128,19 @@ import FastString
\end{code}
+%************************************************************************
+%* *
+ allNameStrings
+%* *
+%************************************************************************
+
+\begin{code}
+allNameStrings :: [String]
+-- Infinite list of a,b,c...z, aa, ab, ac, ... etc
+allNameStrings = [ c:cs | cs <- "" : allNameStrings, c <- ['a'..'z'] ]
+\end{code}
+
+
%************************************************************************
%* *
\subsection{Local Names}
@@ -250,8 +280,6 @@ basicKnownKeyNames
concatName, filterName, mapName,
zipName, foldrName, buildName, augmentName, appendName,
- dollarName, -- The ($) apply function
-
-- FFI primitive types that are not wired-in.
stablePtrTyConName, ptrTyConName, funPtrTyConName,
int8TyConName, int16TyConName, int32TyConName, int64TyConName,
@@ -355,7 +383,7 @@ genericTyConNames = [
pRELUDE :: Module
pRELUDE = mkBaseModule_ pRELUDE_NAME
-gHC_PRIM, gHC_TYPES, gHC_GENERICS, gHC_MAGIC, gHC_COERCIBLE,
+gHC_PRIM, gHC_TYPES, gHC_GENERICS, gHC_MAGIC,
gHC_CLASSES, gHC_BASE, gHC_ENUM, gHC_GHCI, gHC_CSTRING,
gHC_SHOW, gHC_READ, gHC_NUM, gHC_INTEGER_TYPE, gHC_LIST,
gHC_TUPLE, dATA_TUPLE, dATA_EITHER, dATA_STRING, dATA_FOLDABLE, dATA_TRAVERSABLE, dATA_MONOID,
@@ -372,7 +400,6 @@ gHC_TYPES = mkPrimModule (fsLit "GHC.Types")
gHC_MAGIC = mkPrimModule (fsLit "GHC.Magic")
gHC_CSTRING = mkPrimModule (fsLit "GHC.CString")
gHC_CLASSES = mkPrimModule (fsLit "GHC.Classes")
-gHC_COERCIBLE = mkPrimModule (fsLit "GHC.Coercible")
gHC_BASE = mkBaseModule (fsLit "GHC.Base")
gHC_ENUM = mkBaseModule (fsLit "GHC.Enum")
@@ -434,7 +461,7 @@ rOOT_MAIN = mkMainModule (fsLit ":Main") -- Root module for initialisation
mkInteractiveModule :: Int -> Module
-- (mkInteractiveMoudule 9) makes module 'interactive:M9'
-mkInteractiveModule n = mkModule interactivePackageId (mkModuleName ("Ghci" ++ show n))
+mkInteractiveModule n = mkModule interactivePackageKey (mkModuleName ("Ghci" ++ show n))
pRELUDE_NAME, mAIN_NAME :: ModuleName
pRELUDE_NAME = mkModuleNameFS (fsLit "Prelude")
@@ -445,28 +472,28 @@ dATA_ARRAY_PARALLEL_NAME = mkModuleNameFS (fsLit "Data.Array.Parallel")
dATA_ARRAY_PARALLEL_PRIM_NAME = mkModuleNameFS (fsLit "Data.Array.Parallel.Prim")
mkPrimModule :: FastString -> Module
-mkPrimModule m = mkModule primPackageId (mkModuleNameFS m)
+mkPrimModule m = mkModule primPackageKey (mkModuleNameFS m)
mkIntegerModule :: FastString -> Module
-mkIntegerModule m = mkModule integerPackageId (mkModuleNameFS m)
+mkIntegerModule m = mkModule integerPackageKey (mkModuleNameFS m)
mkBaseModule :: FastString -> Module
-mkBaseModule m = mkModule basePackageId (mkModuleNameFS m)
+mkBaseModule m = mkModule basePackageKey (mkModuleNameFS m)
mkBaseModule_ :: ModuleName -> Module
-mkBaseModule_ m = mkModule basePackageId m
+mkBaseModule_ m = mkModule basePackageKey m
mkThisGhcModule :: FastString -> Module
-mkThisGhcModule m = mkModule thisGhcPackageId (mkModuleNameFS m)
+mkThisGhcModule m = mkModule thisGhcPackageKey (mkModuleNameFS m)
mkThisGhcModule_ :: ModuleName -> Module
-mkThisGhcModule_ m = mkModule thisGhcPackageId m
+mkThisGhcModule_ m = mkModule thisGhcPackageKey m
mkMainModule :: FastString -> Module
-mkMainModule m = mkModule mainPackageId (mkModuleNameFS m)
+mkMainModule m = mkModule mainPackageKey (mkModuleNameFS m)
mkMainModule_ :: ModuleName -> Module
-mkMainModule_ m = mkModule mainPackageId m
+mkMainModule_ m = mkModule mainPackageKey m
\end{code}
%************************************************************************
@@ -476,10 +503,10 @@ mkMainModule_ m = mkModule mainPackageId m
%************************************************************************
\begin{code}
-mkTupleModule :: TupleSort -> Arity -> Module
-mkTupleModule BoxedTuple _ = gHC_TUPLE
-mkTupleModule ConstraintTuple _ = gHC_TUPLE
-mkTupleModule UnboxedTuple _ = gHC_PRIM
+mkTupleModule :: TupleSort -> Module
+mkTupleModule BoxedTuple = gHC_TUPLE
+mkTupleModule ConstraintTuple = gHC_TUPLE
+mkTupleModule UnboxedTuple = gHC_PRIM
\end{code}
@@ -805,20 +832,20 @@ inlineIdName = varQual gHC_MAGIC (fsLit "inline") inlineIdKey
-- Base classes (Eq, Ord, Functor)
fmapName, eqClassName, eqName, ordClassName, geName, functorClassName :: Name
-eqClassName = clsQual gHC_CLASSES (fsLit "Eq") eqClassKey
-eqName = methName gHC_CLASSES (fsLit "==") eqClassOpKey
-ordClassName = clsQual gHC_CLASSES (fsLit "Ord") ordClassKey
-geName = methName gHC_CLASSES (fsLit ">=") geClassOpKey
-functorClassName = clsQual gHC_BASE (fsLit "Functor") functorClassKey
-fmapName = methName gHC_BASE (fsLit "fmap") fmapClassOpKey
+eqClassName = clsQual gHC_CLASSES (fsLit "Eq") eqClassKey
+eqName = varQual gHC_CLASSES (fsLit "==") eqClassOpKey
+ordClassName = clsQual gHC_CLASSES (fsLit "Ord") ordClassKey
+geName = varQual gHC_CLASSES (fsLit ">=") geClassOpKey
+functorClassName = clsQual gHC_BASE (fsLit "Functor") functorClassKey
+fmapName = varQual gHC_BASE (fsLit "fmap") fmapClassOpKey
-- Class Monad
monadClassName, thenMName, bindMName, returnMName, failMName :: Name
-monadClassName = clsQual gHC_BASE (fsLit "Monad") monadClassKey
-thenMName = methName gHC_BASE (fsLit ">>") thenMClassOpKey
-bindMName = methName gHC_BASE (fsLit ">>=") bindMClassOpKey
-returnMName = methName gHC_BASE (fsLit "return") returnMClassOpKey
-failMName = methName gHC_BASE (fsLit "fail") failMClassOpKey
+monadClassName = clsQual gHC_BASE (fsLit "Monad") monadClassKey
+thenMName = varQual gHC_BASE (fsLit ">>") thenMClassOpKey
+bindMName = varQual gHC_BASE (fsLit ">>=") bindMClassOpKey
+returnMName = varQual gHC_BASE (fsLit "return") returnMClassOpKey
+failMName = varQual gHC_BASE (fsLit "fail") failMClassOpKey
-- Classes (Applicative, Foldable, Traversable)
applicativeClassName, foldableClassName, traversableClassName :: Name
@@ -831,10 +858,10 @@ traversableClassName = clsQual dATA_TRAVERSABLE (fsLit "Traversable") trave
-- AMP additions
joinMName, apAName, pureAName, alternativeClassName :: Name
-joinMName = methName mONAD (fsLit "join") joinMIdKey
-apAName = methName cONTROL_APPLICATIVE (fsLit "<*>") apAClassOpKey
-pureAName = methName cONTROL_APPLICATIVE (fsLit "pure") pureAClassOpKey
-alternativeClassName = clsQual cONTROL_APPLICATIVE (fsLit "Alternative") alternativeClassKey
+joinMName = varQual mONAD (fsLit "join") joinMIdKey
+apAName = varQual cONTROL_APPLICATIVE (fsLit "<*>") apAClassOpKey
+pureAName = varQual cONTROL_APPLICATIVE (fsLit "pure") pureAClassOpKey
+alternativeClassName = clsQual cONTROL_APPLICATIVE (fsLit "Alternative") alternativeClassKey
joinMIdKey, apAClassOpKey, pureAClassOpKey, alternativeClassKey :: Unique
joinMIdKey = mkPreludeMiscIdUnique 750
@@ -851,20 +878,19 @@ groupWithName = varQual gHC_EXTS (fsLit "groupWith") groupWithIdKey
fromStringName, otherwiseIdName, foldrName, buildName, augmentName,
mapName, appendName, assertName,
breakpointName, breakpointCondName, breakpointAutoName,
- dollarName, opaqueTyConName :: Name
-fromStringName = methName dATA_STRING (fsLit "fromString") fromStringClassOpKey
+ opaqueTyConName :: Name
+fromStringName = varQual dATA_STRING (fsLit "fromString") fromStringClassOpKey
otherwiseIdName = varQual gHC_BASE (fsLit "otherwise") otherwiseIdKey
foldrName = varQual gHC_BASE (fsLit "foldr") foldrIdKey
buildName = varQual gHC_BASE (fsLit "build") buildIdKey
augmentName = varQual gHC_BASE (fsLit "augment") augmentIdKey
mapName = varQual gHC_BASE (fsLit "map") mapIdKey
appendName = varQual gHC_BASE (fsLit "++") appendIdKey
-dollarName = varQual gHC_BASE (fsLit "$") dollarIdKey
assertName = varQual gHC_BASE (fsLit "assert") assertIdKey
breakpointName = varQual gHC_BASE (fsLit "breakpoint") breakpointIdKey
breakpointCondName= varQual gHC_BASE (fsLit "breakpointCond") breakpointCondIdKey
breakpointAutoName= varQual gHC_BASE (fsLit "breakpointAuto") breakpointAutoIdKey
-opaqueTyConName = tcQual gHC_BASE (fsLit "Opaque") opaqueTyConKey
+opaqueTyConName = tcQual gHC_BASE (fsLit "Opaque") opaqueTyConKey
breakpointJumpName :: Name
breakpointJumpName
@@ -892,10 +918,10 @@ sndName = varQual dATA_TUPLE (fsLit "snd") sndIdKey
-- Module GHC.Num
numClassName, fromIntegerName, minusName, negateName :: Name
-numClassName = clsQual gHC_NUM (fsLit "Num") numClassKey
-fromIntegerName = methName gHC_NUM (fsLit "fromInteger") fromIntegerClassOpKey
-minusName = methName gHC_NUM (fsLit "-") minusClassOpKey
-negateName = methName gHC_NUM (fsLit "negate") negateClassOpKey
+numClassName = clsQual gHC_NUM (fsLit "Num") numClassKey
+fromIntegerName = varQual gHC_NUM (fsLit "fromInteger") fromIntegerClassOpKey
+minusName = varQual gHC_NUM (fsLit "-") minusClassOpKey
+negateName = varQual gHC_NUM (fsLit "negate") negateClassOpKey
integerTyConName, mkIntegerName,
integerToWord64Name, integerToInt64Name,
@@ -962,23 +988,23 @@ rationalTyConName, ratioTyConName, ratioDataConName, realClassName,
integralClassName, realFracClassName, fractionalClassName,
fromRationalName, toIntegerName, toRationalName, fromIntegralName,
realToFracName :: Name
-rationalTyConName = tcQual gHC_REAL (fsLit "Rational") rationalTyConKey
-ratioTyConName = tcQual gHC_REAL (fsLit "Ratio") ratioTyConKey
-ratioDataConName = conName gHC_REAL (fsLit ":%") ratioDataConKey
-realClassName = clsQual gHC_REAL (fsLit "Real") realClassKey
-integralClassName = clsQual gHC_REAL (fsLit "Integral") integralClassKey
-realFracClassName = clsQual gHC_REAL (fsLit "RealFrac") realFracClassKey
-fractionalClassName = clsQual gHC_REAL (fsLit "Fractional") fractionalClassKey
-fromRationalName = methName gHC_REAL (fsLit "fromRational") fromRationalClassOpKey
-toIntegerName = methName gHC_REAL (fsLit "toInteger") toIntegerClassOpKey
-toRationalName = methName gHC_REAL (fsLit "toRational") toRationalClassOpKey
-fromIntegralName = varQual gHC_REAL (fsLit "fromIntegral") fromIntegralIdKey
-realToFracName = varQual gHC_REAL (fsLit "realToFrac") realToFracIdKey
+rationalTyConName = tcQual gHC_REAL (fsLit "Rational") rationalTyConKey
+ratioTyConName = tcQual gHC_REAL (fsLit "Ratio") ratioTyConKey
+ratioDataConName = conName gHC_REAL (fsLit ":%") ratioDataConKey
+realClassName = clsQual gHC_REAL (fsLit "Real") realClassKey
+integralClassName = clsQual gHC_REAL (fsLit "Integral") integralClassKey
+realFracClassName = clsQual gHC_REAL (fsLit "RealFrac") realFracClassKey
+fractionalClassName = clsQual gHC_REAL (fsLit "Fractional") fractionalClassKey
+fromRationalName = varQual gHC_REAL (fsLit "fromRational") fromRationalClassOpKey
+toIntegerName = varQual gHC_REAL (fsLit "toInteger") toIntegerClassOpKey
+toRationalName = varQual gHC_REAL (fsLit "toRational") toRationalClassOpKey
+fromIntegralName = varQual gHC_REAL (fsLit "fromIntegral")fromIntegralIdKey
+realToFracName = varQual gHC_REAL (fsLit "realToFrac") realToFracIdKey
-- PrelFloat classes
floatingClassName, realFloatClassName :: Name
-floatingClassName = clsQual gHC_FLOAT (fsLit "Floating") floatingClassKey
-realFloatClassName = clsQual gHC_FLOAT (fsLit "RealFloat") realFloatClassKey
+floatingClassName = clsQual gHC_FLOAT (fsLit "Floating") floatingClassKey
+realFloatClassName = clsQual gHC_FLOAT (fsLit "RealFloat") realFloatClassKey
-- other GHC.Float functions
rationalToFloatName, rationalToDoubleName :: Name
@@ -994,7 +1020,7 @@ typeableClassName,
oldTypeableClassName, oldTypeable1ClassName, oldTypeable2ClassName,
oldTypeable3ClassName, oldTypeable4ClassName, oldTypeable5ClassName,
oldTypeable6ClassName, oldTypeable7ClassName :: Name
-typeableClassName = clsQual tYPEABLE_INTERNAL (fsLit "Typeable") typeableClassKey
+typeableClassName = clsQual tYPEABLE_INTERNAL (fsLit "Typeable") typeableClassKey
oldTypeableClassName = clsQual oLDTYPEABLE_INTERNAL (fsLit "Typeable") oldTypeableClassKey
oldTypeable1ClassName = clsQual oLDTYPEABLE_INTERNAL (fsLit "Typeable1") oldTypeable1ClassKey
oldTypeable2ClassName = clsQual oLDTYPEABLE_INTERNAL (fsLit "Typeable2") oldTypeable2ClassKey
@@ -1020,33 +1046,33 @@ assertErrorName = varQual gHC_IO_Exception (fsLit "assertError") assertErrorId
-- Enum module (Enum, Bounded)
enumClassName, enumFromName, enumFromToName, enumFromThenName,
enumFromThenToName, boundedClassName :: Name
-enumClassName = clsQual gHC_ENUM (fsLit "Enum") enumClassKey
-enumFromName = methName gHC_ENUM (fsLit "enumFrom") enumFromClassOpKey
-enumFromToName = methName gHC_ENUM (fsLit "enumFromTo") enumFromToClassOpKey
-enumFromThenName = methName gHC_ENUM (fsLit "enumFromThen") enumFromThenClassOpKey
-enumFromThenToName = methName gHC_ENUM (fsLit "enumFromThenTo") enumFromThenToClassOpKey
-boundedClassName = clsQual gHC_ENUM (fsLit "Bounded") boundedClassKey
+enumClassName = clsQual gHC_ENUM (fsLit "Enum") enumClassKey
+enumFromName = varQual gHC_ENUM (fsLit "enumFrom") enumFromClassOpKey
+enumFromToName = varQual gHC_ENUM (fsLit "enumFromTo") enumFromToClassOpKey
+enumFromThenName = varQual gHC_ENUM (fsLit "enumFromThen") enumFromThenClassOpKey
+enumFromThenToName = varQual gHC_ENUM (fsLit "enumFromThenTo") enumFromThenToClassOpKey
+boundedClassName = clsQual gHC_ENUM (fsLit "Bounded") boundedClassKey
-- List functions
concatName, filterName, zipName :: Name
concatName = varQual gHC_LIST (fsLit "concat") concatIdKey
filterName = varQual gHC_LIST (fsLit "filter") filterIdKey
-zipName = varQual gHC_LIST (fsLit "zip") zipIdKey
+zipName = varQual gHC_LIST (fsLit "zip") zipIdKey
-- Overloaded lists
isListClassName, fromListName, fromListNName, toListName :: Name
-isListClassName = clsQual gHC_EXTS (fsLit "IsList") isListClassKey
-fromListName = methName gHC_EXTS (fsLit "fromList") fromListClassOpKey
-fromListNName = methName gHC_EXTS (fsLit "fromListN") fromListNClassOpKey
-toListName = methName gHC_EXTS (fsLit "toList") toListClassOpKey
+isListClassName = clsQual gHC_EXTS (fsLit "IsList") isListClassKey
+fromListName = varQual gHC_EXTS (fsLit "fromList") fromListClassOpKey
+fromListNName = varQual gHC_EXTS (fsLit "fromListN") fromListNClassOpKey
+toListName = varQual gHC_EXTS (fsLit "toList") toListClassOpKey
-- Class Show
showClassName :: Name
-showClassName = clsQual gHC_SHOW (fsLit "Show") showClassKey
+showClassName = clsQual gHC_SHOW (fsLit "Show") showClassKey
-- Class Read
readClassName :: Name
-readClassName = clsQual gHC_READ (fsLit "Read") readClassKey
+readClassName = clsQual gHC_READ (fsLit "Read") readClassKey
-- Classes Generic and Generic1, Datatype, Constructor and Selector
genClassName, gen1ClassName, datatypeClassName, constructorClassName,
@@ -1054,24 +1080,27 @@ genClassName, gen1ClassName, datatypeClassName, constructorClassName,
genClassName = clsQual gHC_GENERICS (fsLit "Generic") genClassKey
gen1ClassName = clsQual gHC_GENERICS (fsLit "Generic1") gen1ClassKey
-datatypeClassName = clsQual gHC_GENERICS (fsLit "Datatype") datatypeClassKey
+datatypeClassName = clsQual gHC_GENERICS (fsLit "Datatype") datatypeClassKey
constructorClassName = clsQual gHC_GENERICS (fsLit "Constructor") constructorClassKey
-selectorClassName = clsQual gHC_GENERICS (fsLit "Selector") selectorClassKey
+selectorClassName = clsQual gHC_GENERICS (fsLit "Selector") selectorClassKey
+
+genericClassNames :: [Name]
+genericClassNames = [genClassName, gen1ClassName]
-- GHCi things
ghciIoClassName, ghciStepIoMName :: Name
ghciIoClassName = clsQual gHC_GHCI (fsLit "GHCiSandboxIO") ghciIoClassKey
-ghciStepIoMName = methName gHC_GHCI (fsLit "ghciStepIO") ghciStepIoMClassOpKey
+ghciStepIoMName = varQual gHC_GHCI (fsLit "ghciStepIO") ghciStepIoMClassOpKey
-- IO things
ioTyConName, ioDataConName, thenIOName, bindIOName, returnIOName,
failIOName :: Name
-ioTyConName = tcQual gHC_TYPES (fsLit "IO") ioTyConKey
-ioDataConName = conName gHC_TYPES (fsLit "IO") ioDataConKey
-thenIOName = varQual gHC_BASE (fsLit "thenIO") thenIOIdKey
-bindIOName = varQual gHC_BASE (fsLit "bindIO") bindIOIdKey
-returnIOName = varQual gHC_BASE (fsLit "returnIO") returnIOIdKey
-failIOName = varQual gHC_IO (fsLit "failIO") failIOIdKey
+ioTyConName = tcQual gHC_TYPES (fsLit "IO") ioTyConKey
+ioDataConName = conName gHC_TYPES (fsLit "IO") ioDataConKey
+thenIOName = varQual gHC_BASE (fsLit "thenIO") thenIOIdKey
+bindIOName = varQual gHC_BASE (fsLit "bindIO") bindIOIdKey
+returnIOName = varQual gHC_BASE (fsLit "returnIO") returnIOIdKey
+failIOName = varQual gHC_IO (fsLit "failIO") failIOIdKey
-- IO things
printName :: Name
@@ -1079,7 +1108,7 @@ printName = varQual sYSTEM_IO (fsLit "print") printIdKey
-- Int, Word, and Addr things
int8TyConName, int16TyConName, int32TyConName, int64TyConName :: Name
-int8TyConName = tcQual gHC_INT (fsLit "Int8") int8TyConKey
+int8TyConName = tcQual gHC_INT (fsLit "Int8") int8TyConKey
int16TyConName = tcQual gHC_INT (fsLit "Int16") int16TyConKey
int32TyConName = tcQual gHC_INT (fsLit "Int32") int32TyConKey
int64TyConName = tcQual gHC_INT (fsLit "Int64") int64TyConKey
@@ -1093,12 +1122,12 @@ word64TyConName = tcQual gHC_WORD (fsLit "Word64") word64TyConKey
-- PrelPtr module
ptrTyConName, funPtrTyConName :: Name
-ptrTyConName = tcQual gHC_PTR (fsLit "Ptr") ptrTyConKey
+ptrTyConName = tcQual gHC_PTR (fsLit "Ptr") ptrTyConKey
funPtrTyConName = tcQual gHC_PTR (fsLit "FunPtr") funPtrTyConKey
-- Foreign objects and weak pointers
stablePtrTyConName, newStablePtrName :: Name
-stablePtrTyConName = tcQual gHC_STABLE (fsLit "StablePtr") stablePtrTyConKey
+stablePtrTyConName = tcQual gHC_STABLE (fsLit "StablePtr") stablePtrTyConKey
newStablePtrName = varQual gHC_STABLE (fsLit "newStablePtr") newStablePtrIdKey
-- PrelST module
@@ -1108,21 +1137,21 @@ runSTRepName = varQual gHC_ST (fsLit "runSTRep") runSTRepIdKey
-- Recursive-do notation
monadFixClassName, mfixName :: Name
monadFixClassName = clsQual mONAD_FIX (fsLit "MonadFix") monadFixClassKey
-mfixName = methName mONAD_FIX (fsLit "mfix") mfixIdKey
+mfixName = varQual mONAD_FIX (fsLit "mfix") mfixIdKey
-- Arrow notation
arrAName, composeAName, firstAName, appAName, choiceAName, loopAName :: Name
-arrAName = varQual aRROW (fsLit "arr") arrAIdKey
+arrAName = varQual aRROW (fsLit "arr") arrAIdKey
composeAName = varQual gHC_DESUGAR (fsLit ">>>") composeAIdKey
-firstAName = varQual aRROW (fsLit "first") firstAIdKey
-appAName = varQual aRROW (fsLit "app") appAIdKey
-choiceAName = varQual aRROW (fsLit "|||") choiceAIdKey
-loopAName = varQual aRROW (fsLit "loop") loopAIdKey
+firstAName = varQual aRROW (fsLit "first") firstAIdKey
+appAName = varQual aRROW (fsLit "app") appAIdKey
+choiceAName = varQual aRROW (fsLit "|||") choiceAIdKey
+loopAName = varQual aRROW (fsLit "loop") loopAIdKey
-- Monad comprehensions
guardMName, liftMName, mzipName :: Name
-guardMName = varQual mONAD (fsLit "guard") guardMIdKey
-liftMName = varQual mONAD (fsLit "liftM") liftMIdKey
+guardMName = varQual mONAD (fsLit "guard") guardMIdKey
+liftMName = varQual mONAD (fsLit "liftM") liftMIdKey
mzipName = varQual mONAD_ZIP (fsLit "mzip") mzipIdKey
@@ -1133,9 +1162,9 @@ toAnnotationWrapperName = varQual gHC_DESUGAR (fsLit "toAnnotationWrapper") toAn
-- Other classes, needed for type defaulting
monadPlusClassName, randomClassName, randomGenClassName,
isStringClassName :: Name
-monadPlusClassName = clsQual mONAD (fsLit "MonadPlus") monadPlusClassKey
-randomClassName = clsQual rANDOM (fsLit "Random") randomClassKey
-randomGenClassName = clsQual rANDOM (fsLit "RandomGen") randomGenClassKey
+monadPlusClassName = clsQual mONAD (fsLit "MonadPlus") monadPlusClassKey
+randomClassName = clsQual rANDOM (fsLit "Random") randomClassKey
+randomGenClassName = clsQual rANDOM (fsLit "RandomGen") randomGenClassKey
isStringClassName = clsQual dATA_STRING (fsLit "IsString") isStringClassKey
-- Type-level naturals
@@ -1191,10 +1220,6 @@ mk_known_key_name space modu str unique
conName :: Module -> FastString -> Unique -> Name
conName modu occ unique
= mkExternalName unique modu (mkOccNameFS dataName occ) noSrcSpan
-
-methName :: Module -> FastString -> Unique -> Name
-methName modu occ unique
- = mkExternalName unique modu (mkVarOccFS occ) noSrcSpan
\end{code}
%************************************************************************
@@ -1303,12 +1328,13 @@ addrPrimTyConKey, arrayPrimTyConKey, arrayArrayPrimTyConKey, boolTyConKey, byteA
floatPrimTyConKey, floatTyConKey, funTyConKey, intPrimTyConKey,
intTyConKey, int8TyConKey, int16TyConKey, int32PrimTyConKey,
int32TyConKey, int64PrimTyConKey, int64TyConKey,
- integerTyConKey, digitsTyConKey,
+ integerTyConKey,
listTyConKey, foreignObjPrimTyConKey, weakPrimTyConKey,
mutableArrayPrimTyConKey, mutableArrayArrayPrimTyConKey, mutableByteArrayPrimTyConKey,
orderingTyConKey, mVarPrimTyConKey, ratioTyConKey, rationalTyConKey,
realWorldTyConKey, stablePtrPrimTyConKey, stablePtrTyConKey,
- anyTyConKey, eqTyConKey :: Unique
+ anyTyConKey, eqTyConKey, smallArrayPrimTyConKey,
+ smallMutableArrayPrimTyConKey :: Unique
addrPrimTyConKey = mkPreludeTyConUnique 1
arrayPrimTyConKey = mkPreludeTyConUnique 3
boolTyConKey = mkPreludeTyConUnique 4
@@ -1329,7 +1355,7 @@ int32TyConKey = mkPreludeTyConUnique 19
int64PrimTyConKey = mkPreludeTyConUnique 20
int64TyConKey = mkPreludeTyConUnique 21
integerTyConKey = mkPreludeTyConUnique 22
-digitsTyConKey = mkPreludeTyConUnique 23
+
listTyConKey = mkPreludeTyConUnique 24
foreignObjPrimTyConKey = mkPreludeTyConUnique 25
weakPrimTyConKey = mkPreludeTyConUnique 27
@@ -1475,6 +1501,7 @@ rep1TyConKey = mkPreludeTyConUnique 156
typeNatKindConNameKey, typeSymbolKindConNameKey,
typeNatAddTyFamNameKey, typeNatMulTyFamNameKey, typeNatExpTyFamNameKey,
typeNatLeqTyFamNameKey, typeNatSubTyFamNameKey
+ , typeSymbolCmpTyFamNameKey, typeNatCmpTyFamNameKey
:: Unique
typeNatKindConNameKey = mkPreludeTyConUnique 160
typeSymbolKindConNameKey = mkPreludeTyConUnique 161
@@ -1483,6 +1510,8 @@ typeNatMulTyFamNameKey = mkPreludeTyConUnique 163
typeNatExpTyFamNameKey = mkPreludeTyConUnique 164
typeNatLeqTyFamNameKey = mkPreludeTyConUnique 165
typeNatSubTyFamNameKey = mkPreludeTyConUnique 166
+typeSymbolCmpTyFamNameKey = mkPreludeTyConUnique 167
+typeNatCmpTyFamNameKey = mkPreludeTyConUnique 168
ntTyConKey:: Unique
ntTyConKey = mkPreludeTyConUnique 174
@@ -1495,6 +1524,9 @@ proxyPrimTyConKey = mkPreludeTyConUnique 176
specTyConKey :: Unique
specTyConKey = mkPreludeTyConUnique 177
+smallArrayPrimTyConKey = mkPreludeTyConUnique 178
+smallMutableArrayPrimTyConKey = mkPreludeTyConUnique 179
+
---------------- Template Haskell -------------------
-- USES TyConUniques 200-299
-----------------------------------------------------
diff --git a/compiler/prelude/PrelRules.lhs b/compiler/prelude/PrelRules.lhs
index 11367edfecd2..d2e648f38247 100644
--- a/compiler/prelude/PrelRules.lhs
+++ b/compiler/prelude/PrelRules.lhs
@@ -12,8 +12,8 @@ ToDo:
(i1 + i2) only if it results in a valid Float.
\begin{code}
-{-# LANGUAGE RankNTypes #-}
-{-# OPTIONS -optc-DNON_POSIX_SOURCE #-}
+{-# LANGUAGE CPP, RankNTypes #-}
+{-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE #-}
module PrelRules ( primOpRules, builtinRules ) where
@@ -111,6 +111,8 @@ primOpRules nm OrIOp = mkPrimOpRule nm 2 [ binaryLit (intOp2 (.|.))
primOpRules nm XorIOp = mkPrimOpRule nm 2 [ binaryLit (intOp2 xor)
, identityDynFlags zeroi
, equalArgs >> retLit zeroi ]
+primOpRules nm NotIOp = mkPrimOpRule nm 1 [ unaryLit complementOp
+ , inversePrimOp NotIOp ]
primOpRules nm IntNegOp = mkPrimOpRule nm 1 [ unaryLit negOp
, inversePrimOp IntNegOp ]
primOpRules nm ISllOp = mkPrimOpRule nm 2 [ binaryLit (intOp2 Bits.shiftL)
@@ -141,6 +143,8 @@ primOpRules nm OrOp = mkPrimOpRule nm 2 [ binaryLit (wordOp2 (.|.))
primOpRules nm XorOp = mkPrimOpRule nm 2 [ binaryLit (wordOp2 xor)
, identityDynFlags zerow
, equalArgs >> retLit zerow ]
+primOpRules nm NotOp = mkPrimOpRule nm 1 [ unaryLit complementOp
+ , inversePrimOp NotOp ]
primOpRules nm SllOp = mkPrimOpRule nm 2 [ wordShiftRule Bits.shiftL ]
primOpRules nm SrlOp = mkPrimOpRule nm 2 [ wordShiftRule shiftRightLogical ]
@@ -345,6 +349,11 @@ negOp dflags (MachDouble d) = Just (mkDoubleVal dflags (-d))
negOp dflags (MachInt i) = intResult dflags (-i)
negOp _ _ = Nothing
+complementOp :: DynFlags -> Literal -> Maybe CoreExpr -- Binary complement
+complementOp dflags (MachWord i) = wordResult dflags (complement i)
+complementOp dflags (MachInt i) = intResult dflags (complement i)
+complementOp _ _ = Nothing
+
--------------------------
intOp2 :: (Integral a, Integral b)
=> (a -> b -> Integer)
diff --git a/compiler/prelude/PrimOp.lhs b/compiler/prelude/PrimOp.lhs
index 12f71c22303d..198078bc9f4e 100644
--- a/compiler/prelude/PrimOp.lhs
+++ b/compiler/prelude/PrimOp.lhs
@@ -4,6 +4,8 @@
\section[PrimOp]{Primitive operations (machine-level)}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module PrimOp (
PrimOp(..), PrimOpVecCat(..), allThePrimOps,
primOpType, primOpSig,
@@ -38,7 +40,7 @@ import Unique ( Unique, mkPrimOpIdUnique )
import Outputable
import FastTypes
import FastString
-import Module ( PackageId )
+import Module ( PackageKey )
\end{code}
%************************************************************************
@@ -327,27 +329,89 @@ Note [PrimOp can_fail and has_side_effects]
Both can_fail and has_side_effects mean that the primop has
some effect that is not captured entirely by its result value.
- ---------- has_side_effects ---------------------
- Has some imperative side effect, perhaps on the world (I/O),
- or perhaps on some mutable data structure (writeIORef).
- Generally speaking all such primops have a type like
- State -> input -> (State, output)
- so the state token guarantees ordering, and also ensures
- that the primop is executed even if 'output' is discarded.
-
- ---------- can_fail ----------------------------
- Can fail with a seg-fault or divide-by-zero error on some elements
- of its input domain. Main examples:
- division (fails on zero demoninator
- array indexing (fails if the index is out of bounds)
- However (ASSUMPTION), these can_fail primops are ALWAYS surrounded
- with a test that checks for the bad cases.
-
-Consequences:
-
-* You can discard a can_fail primop, or float it _inwards_.
- But you cannot float it _outwards_, lest you escape the
- dynamic scope of the test. Example:
+---------- has_side_effects ---------------------
+A primop "has_side_effects" if it has some *write* effect, visible
+elsewhere
+ - writing to the world (I/O)
+ - writing to a mutable data structure (writeIORef)
+ - throwing a synchronous Haskell exception
+
+Often such primops have a type like
+ State -> input -> (State, output)
+so the state token guarantees ordering. In general we rely *only* on
+data dependencies of the state token to enforce write-effect ordering
+
+ * NB1: if you inline unsafePerformIO, you may end up with
+ side-effecting ops whose 'state' output is discarded.
+ And programmers may do that by hand; see Trac #9390.
+ That is why we (conservatively) do not discard write-effecting
+ primops even if both their state and result is discarded.
+
+ * NB2: We consider primops, such as raiseIO#, that can raise a
+ (Haskell) synchronous exception to "have_side_effects" but not
+ "can_fail". We must be careful about not discarding such things;
+ see the paper "A semantics for imprecise exceptions".
+
+ * NB3: *Read* effects (like reading an IORef) don't count here,
+ because it doesn't matter if we don't do them, or do them more than
+ once. *Sequencing* is maintained by the data dependency of the state
+ token.
+
+---------- can_fail ----------------------------
+A primop "can_fail" if it can fail with an *unchecked* exception on
+some elements of its input domain. Main examples:
+ division (fails on zero demoninator)
+ array indexing (fails if the index is out of bounds)
+
+An "unchecked exception" is one that is an outright error, (not
+turned into a Haskell exception,) such as seg-fault or
+divide-by-zero error. Such can_fail primops are ALWAYS surrounded
+with a test that checks for the bad cases, but we need to be
+very careful about code motion that might move it out of
+the scope of the test.
+
+Note [Transformations affected by can_fail and has_side_effects]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The can_fail and has_side_effects properties have the following effect
+on program transformations. Summary table is followed by details.
+
+ can_fail has_side_effects
+Discard NO NO
+Float in YES YES
+Float out NO NO
+Duplicate YES NO
+
+* Discarding. case (a `op` b) of _ -> rhs ===> rhs
+ You should not discard a has_side_effects primop; e.g.
+ case (writeIntArray# a i v s of (# _, _ #) -> True
+ Arguably you should be able to discard this, since the
+ returned stat token is not used, but that relies on NEVER
+ inlining unsafePerformIO, and programmers sometimes write
+ this kind of stuff by hand (Trac #9390). So we (conservatively)
+ never discard a has_side_effects primop.
+
+ However, it's fine to discard a can_fail primop. For example
+ case (indexIntArray# a i) of _ -> True
+ We can discard indexIntArray#; it has can_fail, but not
+ has_side_effects; see Trac #5658 which was all about this.
+ Notice that indexIntArray# is (in a more general handling of
+ effects) read effect, but we don't care about that here, and
+ treat read effects as *not* has_side_effects.
+
+ Similarly (a `/#` b) can be discarded. It can seg-fault or
+ cause a hardware exception, but not a synchronous Haskell
+ exception.
+
+
+
+ Synchronous Haskell exceptions, e.g. from raiseIO#, are treated
+ as has_side_effects and hence are not discarded.
+
+* Float in. You can float a can_fail or has_side_effects primop
+ *inwards*, but not inside a lambda (see Duplication below).
+
+* Float out. You must not float a can_fail primop *outwards* lest
+ you escape the dynamic scope of the test. Example:
case d ># 0# of
True -> case x /# d of r -> r +# 1
False -> 0
@@ -357,25 +421,21 @@ Consequences:
True -> r +# 1
False -> 0
-* I believe that exactly the same rules apply to a has_side_effects
- primop; you can discard it (remember, the state token will keep
- it alive if necessary), or float it in, but not float it out.
-
- Example of the latter
- if blah then let! s1 = writeMutVar s0 v True in s1
+ Nor can you float out a has_side_effects primop. For example:
+ if blah then case writeMutVar# v True s0 of (# s1 #) -> s1
else s0
- Notice that s0 is mentioned in both branches of the 'if', but
+ Notice that s0 is mentioned in both branches of the 'if', but
only one of these two will actually be consumed. But if we
float out to
- let! s1 = writeMutVar s0 v True
- in if blah then s1 else s0
+ case writeMutVar# v True s0 of (# s1 #) ->
+ if blah then s1 else s0
the writeMutVar will be performed in both branches, which is
utterly wrong.
-* You cannot duplicate a has_side_effect primop. You might wonder
- how this can occur given the state token threading, but just look
- at Control.Monad.ST.Lazy.Imp.strictToLazy! We get something like
- this
+* Duplication. You cannot duplicate a has_side_effect primop. You
+ might wonder how this can occur given the state token threading, but
+ just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get
+ something like this
p = case readMutVar# s v of
(# s', r #) -> (S# s', r)
s' = case p of (s', r) -> s'
@@ -383,28 +443,28 @@ Consequences:
(All these bindings are boxed.) If we inline p at its two call
sites, we get a catastrophe: because the read is performed once when
- s' is demanded, and once when 'r' is demanded, which may be much
+ s' is demanded, and once when 'r' is demanded, which may be much
later. Utterly wrong. Trac #3207 is real example of this happening.
- However, it's fine to duplicate a can_fail primop. That is
- the difference between can_fail and has_side_effects.
+ However, it's fine to duplicate a can_fail primop. That is really
+ the only difference between can_fail and has_side_effects.
- can_fail has_side_effects
-Discard YES YES
-Float in YES YES
-Float out NO NO
-Duplicate YES NO
+Note [Implementation: how can_fail/has_side_effects affect transformations]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+How do we ensure that that floating/duplication/discarding are done right
+in the simplifier?
-How do we achieve these effects?
+Two main predicates on primpops test these flags:
+ primOpOkForSideEffects <=> not has_side_effects
+ primOpOkForSpeculation <=> not (has_side_effects || can_fail)
-Note [primOpOkForSpeculation]
* The "no-float-out" thing is achieved by ensuring that we never
let-bind a can_fail or has_side_effects primop. The RHS of a
let-binding (which can float in and out freely) satisfies
- exprOkForSpeculation. And exprOkForSpeculation is false of
- can_fail and no_side_effect.
+ exprOkForSpeculation; this is the let/app invariant. And
+ exprOkForSpeculation is false of can_fail and has_side_effects.
- * So can_fail and no_side_effect primops will appear only as the
+ * So can_fail and has_side_effects primops will appear only as the
scrutinees of cases, and that's why the FloatIn pass is capable
of floating case bindings inwards.
@@ -420,10 +480,14 @@ primOpCanFail :: PrimOp -> Bool
#include "primop-can-fail.hs-incl"
primOpOkForSpeculation :: PrimOp -> Bool
- -- See Note [primOpOkForSpeculation and primOpOkForFloatOut]
+ -- See Note [PrimOp can_fail and has_side_effects]
-- See comments with CoreUtils.exprOkForSpeculation
+ -- primOpOkForSpeculation => primOpOkForSideEffects
primOpOkForSpeculation op
- = not (primOpHasSideEffects op || primOpOutOfLine op || primOpCanFail op)
+ = primOpOkForSideEffects op
+ && not (primOpOutOfLine op || primOpCanFail op)
+ -- I think the "out of line" test is because out of line things can
+ -- be expensive (eg sine, cosine), and so we may not want to speculate them
primOpOkForSideEffects :: PrimOp -> Bool
primOpOkForSideEffects op
@@ -441,6 +505,7 @@ behaviour of 'seq' for primops that can fail, so we don't treat them as cheap.
\begin{code}
primOpIsCheap :: PrimOp -> Bool
+-- See Note [PrimOp can_fail and has_side_effects]
primOpIsCheap op = primOpOkForSpeculation op
-- In March 2001, we changed this to
-- primOpIsCheap op = False
@@ -585,7 +650,7 @@ pprPrimOp other_op = pprOccName (primOpOcc other_op)
%************************************************************************
\begin{code}
-data PrimCall = PrimCall CLabelString PackageId
+data PrimCall = PrimCall CLabelString PackageKey
instance Outputable PrimCall where
ppr (PrimCall lbl pkgId)
diff --git a/compiler/prelude/TysPrim.lhs b/compiler/prelude/TysPrim.lhs
index bbe5aba119dd..de151fd92f5c 100644
--- a/compiler/prelude/TysPrim.lhs
+++ b/compiler/prelude/TysPrim.lhs
@@ -6,7 +6,8 @@
\section[TysPrim]{Wired-in knowledge about primitive types}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -54,9 +55,11 @@ module TysPrim(
arrayPrimTyCon, mkArrayPrimTy,
byteArrayPrimTyCon, byteArrayPrimTy,
arrayArrayPrimTyCon, mkArrayArrayPrimTy,
+ smallArrayPrimTyCon, mkSmallArrayPrimTy,
mutableArrayPrimTyCon, mkMutableArrayPrimTy,
mutableByteArrayPrimTyCon, mkMutableByteArrayPrimTy,
mutableArrayArrayPrimTyCon, mkMutableArrayArrayPrimTy,
+ smallMutableArrayPrimTyCon, mkSmallMutableArrayPrimTy,
mutVarPrimTyCon, mkMutVarPrimTy,
mVarPrimTyCon, mkMVarPrimTy,
@@ -111,6 +114,7 @@ primTyCons
, arrayPrimTyCon
, byteArrayPrimTyCon
, arrayArrayPrimTyCon
+ , smallArrayPrimTyCon
, charPrimTyCon
, doublePrimTyCon
, floatPrimTyCon
@@ -122,6 +126,7 @@ primTyCons
, mutableArrayPrimTyCon
, mutableByteArrayPrimTyCon
, mutableArrayArrayPrimTyCon
+ , smallMutableArrayPrimTyCon
, mVarPrimTyCon
, tVarPrimTyCon
, mutVarPrimTyCon
@@ -154,9 +159,17 @@ mkPrimTc fs unique tycon
= mkWiredInName gHC_PRIM (mkTcOccFS fs)
unique
(ATyCon tycon) -- Relevant TyCon
- UserSyntax -- None are built-in syntax
+ UserSyntax
-charPrimTyConName, intPrimTyConName, int32PrimTyConName, int64PrimTyConName, wordPrimTyConName, word32PrimTyConName, word64PrimTyConName, addrPrimTyConName, floatPrimTyConName, doublePrimTyConName, statePrimTyConName, proxyPrimTyConName, realWorldTyConName, arrayPrimTyConName, arrayArrayPrimTyConName, byteArrayPrimTyConName, mutableArrayPrimTyConName, mutableByteArrayPrimTyConName, mutableArrayArrayPrimTyConName, mutVarPrimTyConName, mVarPrimTyConName, tVarPrimTyConName, stablePtrPrimTyConName, stableNamePrimTyConName, bcoPrimTyConName, weakPrimTyConName, threadIdPrimTyConName, eqPrimTyConName, eqReprPrimTyConName, voidPrimTyConName :: Name
+mkBuiltInPrimTc :: FastString -> Unique -> TyCon -> Name
+mkBuiltInPrimTc fs unique tycon
+ = mkWiredInName gHC_PRIM (mkTcOccFS fs)
+ unique
+ (ATyCon tycon) -- Relevant TyCon
+ BuiltInSyntax
+
+
+charPrimTyConName, intPrimTyConName, int32PrimTyConName, int64PrimTyConName, wordPrimTyConName, word32PrimTyConName, word64PrimTyConName, addrPrimTyConName, floatPrimTyConName, doublePrimTyConName, statePrimTyConName, proxyPrimTyConName, realWorldTyConName, arrayPrimTyConName, arrayArrayPrimTyConName, smallArrayPrimTyConName, byteArrayPrimTyConName, mutableArrayPrimTyConName, mutableByteArrayPrimTyConName, mutableArrayArrayPrimTyConName, smallMutableArrayPrimTyConName, mutVarPrimTyConName, mVarPrimTyConName, tVarPrimTyConName, stablePtrPrimTyConName, stableNamePrimTyConName, bcoPrimTyConName, weakPrimTyConName, threadIdPrimTyConName, eqPrimTyConName, eqReprPrimTyConName, voidPrimTyConName :: Name
charPrimTyConName = mkPrimTc (fsLit "Char#") charPrimTyConKey charPrimTyCon
intPrimTyConName = mkPrimTc (fsLit "Int#") intPrimTyConKey intPrimTyCon
int32PrimTyConName = mkPrimTc (fsLit "Int32#") int32PrimTyConKey int32PrimTyCon
@@ -171,14 +184,16 @@ statePrimTyConName = mkPrimTc (fsLit "State#") statePrimTyConKey stat
voidPrimTyConName = mkPrimTc (fsLit "Void#") voidPrimTyConKey voidPrimTyCon
proxyPrimTyConName = mkPrimTc (fsLit "Proxy#") proxyPrimTyConKey proxyPrimTyCon
eqPrimTyConName = mkPrimTc (fsLit "~#") eqPrimTyConKey eqPrimTyCon
-eqReprPrimTyConName = mkPrimTc (fsLit "~R#") eqReprPrimTyConKey eqReprPrimTyCon
+eqReprPrimTyConName = mkBuiltInPrimTc (fsLit "~R#") eqReprPrimTyConKey eqReprPrimTyCon
realWorldTyConName = mkPrimTc (fsLit "RealWorld") realWorldTyConKey realWorldTyCon
arrayPrimTyConName = mkPrimTc (fsLit "Array#") arrayPrimTyConKey arrayPrimTyCon
byteArrayPrimTyConName = mkPrimTc (fsLit "ByteArray#") byteArrayPrimTyConKey byteArrayPrimTyCon
arrayArrayPrimTyConName = mkPrimTc (fsLit "ArrayArray#") arrayArrayPrimTyConKey arrayArrayPrimTyCon
+smallArrayPrimTyConName = mkPrimTc (fsLit "SmallArray#") smallArrayPrimTyConKey smallArrayPrimTyCon
mutableArrayPrimTyConName = mkPrimTc (fsLit "MutableArray#") mutableArrayPrimTyConKey mutableArrayPrimTyCon
mutableByteArrayPrimTyConName = mkPrimTc (fsLit "MutableByteArray#") mutableByteArrayPrimTyConKey mutableByteArrayPrimTyCon
mutableArrayArrayPrimTyConName= mkPrimTc (fsLit "MutableArrayArray#") mutableArrayArrayPrimTyConKey mutableArrayArrayPrimTyCon
+smallMutableArrayPrimTyConName= mkPrimTc (fsLit "SmallMutableArray#") smallMutableArrayPrimTyConKey smallMutableArrayPrimTyCon
mutVarPrimTyConName = mkPrimTc (fsLit "MutVar#") mutVarPrimTyConKey mutVarPrimTyCon
mVarPrimTyConName = mkPrimTc (fsLit "MVar#") mVarPrimTyConKey mVarPrimTyCon
tVarPrimTyConName = mkPrimTc (fsLit "TVar#") tVarPrimTyConKey tVarPrimTyCon
@@ -538,13 +553,16 @@ defined in \tr{TysWiredIn.lhs}, not here.
\begin{code}
arrayPrimTyCon, mutableArrayPrimTyCon, mutableByteArrayPrimTyCon,
- byteArrayPrimTyCon, arrayArrayPrimTyCon, mutableArrayArrayPrimTyCon :: TyCon
+ byteArrayPrimTyCon, arrayArrayPrimTyCon, mutableArrayArrayPrimTyCon,
+ smallArrayPrimTyCon, smallMutableArrayPrimTyCon :: TyCon
arrayPrimTyCon = pcPrimTyCon arrayPrimTyConName [Representational] PtrRep
mutableArrayPrimTyCon = pcPrimTyCon mutableArrayPrimTyConName [Nominal, Representational] PtrRep
mutableByteArrayPrimTyCon = pcPrimTyCon mutableByteArrayPrimTyConName [Nominal] PtrRep
byteArrayPrimTyCon = pcPrimTyCon0 byteArrayPrimTyConName PtrRep
arrayArrayPrimTyCon = pcPrimTyCon0 arrayArrayPrimTyConName PtrRep
mutableArrayArrayPrimTyCon = pcPrimTyCon mutableArrayArrayPrimTyConName [Nominal] PtrRep
+smallArrayPrimTyCon = pcPrimTyCon smallArrayPrimTyConName [Representational] PtrRep
+smallMutableArrayPrimTyCon = pcPrimTyCon smallMutableArrayPrimTyConName [Nominal, Representational] PtrRep
mkArrayPrimTy :: Type -> Type
mkArrayPrimTy elt = TyConApp arrayPrimTyCon [elt]
@@ -552,12 +570,16 @@ byteArrayPrimTy :: Type
byteArrayPrimTy = mkTyConTy byteArrayPrimTyCon
mkArrayArrayPrimTy :: Type
mkArrayArrayPrimTy = mkTyConTy arrayArrayPrimTyCon
+mkSmallArrayPrimTy :: Type -> Type
+mkSmallArrayPrimTy elt = TyConApp smallArrayPrimTyCon [elt]
mkMutableArrayPrimTy :: Type -> Type -> Type
mkMutableArrayPrimTy s elt = TyConApp mutableArrayPrimTyCon [s, elt]
mkMutableByteArrayPrimTy :: Type -> Type
mkMutableByteArrayPrimTy s = TyConApp mutableByteArrayPrimTyCon [s]
mkMutableArrayArrayPrimTy :: Type -> Type
mkMutableArrayArrayPrimTy s = TyConApp mutableArrayArrayPrimTyCon [s]
+mkSmallMutableArrayPrimTy :: Type -> Type -> Type
+mkSmallMutableArrayPrimTy s elt = TyConApp smallMutableArrayPrimTyCon [s, elt]
\end{code}
%************************************************************************
@@ -687,7 +709,7 @@ threadIdPrimTyCon = pcPrimTyCon0 threadIdPrimTyConName PtrRep
Note [Any types]
~~~~~~~~~~~~~~~~
-The type constructor Any of kind forall k. k -> k has these properties:
+The type constructor Any of kind forall k. k has these properties:
* It is defined in module GHC.Prim, and exported so that it is
available to users. For this reason it's treated like any other
@@ -700,7 +722,7 @@ The type constructor Any of kind forall k. k -> k has these properties:
g :: ty ~ (Fst ty, Snd ty)
If Any was a *data* type, then we'd get inconsistency because 'ty'
could be (Any '(k1,k2)) and then we'd have an equality with Any on
- one side and '(,) on the other
+ one side and '(,) on the other. See also #9097.
* It is lifted, and hence represented by a pointer
@@ -757,20 +779,12 @@ anyTy :: Type
anyTy = mkTyConTy anyTyCon
anyTyCon :: TyCon
-anyTyCon = mkLiftedPrimTyCon anyTyConName kind [Nominal] PtrRep
- where kind = ForAllTy kKiVar (mkTyVarTy kKiVar)
-
-{- Can't do this yet without messing up kind proxies
--- RAE: I think you can now.
-anyTyCon :: TyCon
-anyTyCon = mkSynTyCon anyTyConName kind [kKiVar]
+anyTyCon = mkSynTyCon anyTyConName kind [kKiVar] [Nominal]
syn_rhs
NoParentTyCon
where
kind = ForAllTy kKiVar (mkTyVarTy kKiVar)
- syn_rhs = SynFamilyTyCon { synf_open = False, synf_injective = True }
- -- NB Closed, injective
--}
+ syn_rhs = AbstractClosedSynFamilyTyCon
anyTypeOfKind :: Kind -> Type
anyTypeOfKind kind = TyConApp anyTyCon [kind]
diff --git a/compiler/prelude/TysWiredIn.lhs b/compiler/prelude/TysWiredIn.lhs
index bf1907d161e0..4586b90cb227 100644
--- a/compiler/prelude/TysWiredIn.lhs
+++ b/compiler/prelude/TysWiredIn.lhs
@@ -4,11 +4,13 @@
\section[TysWiredIn]{Wired-in knowledge about {\em non-primitive} types}
\begin{code}
+{-# LANGUAGE CPP #-}
+
-- | This module is about types that can be defined in Haskell, but which
-- must be wired into the compiler nonetheless. C.f module TysPrim
module TysWiredIn (
-- * All wired in things
- wiredInTyCons,
+ wiredInTyCons, isBuiltInOcc_maybe,
-- * Bool
boolTy, boolTyCon, boolTyCon_RDR, boolTyConName,
@@ -20,6 +22,8 @@ module TysWiredIn (
ltDataCon, ltDataConId,
eqDataCon, eqDataConId,
gtDataCon, gtDataConId,
+ promotedOrderingTyCon,
+ promotedLTDataCon, promotedEQDataCon, promotedGTDataCon,
-- * Char
charTyCon, charDataCon, charTyCon_RDR,
@@ -174,10 +178,12 @@ mkWiredInDataConName built_in modu fs unique datacon
(AConLike (RealDataCon datacon)) -- Relevant DataCon
built_in
+-- See Note [Kind-changing of (~) and Coercible]
eqTyConName, eqBoxDataConName :: Name
eqTyConName = mkWiredInTyConName BuiltInSyntax gHC_TYPES (fsLit "~") eqTyConKey eqTyCon
eqBoxDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Eq#") eqBoxDataConKey eqBoxDataCon
+-- See Note [Kind-changing of (~) and Coercible]
coercibleTyConName, coercibleDataConName :: Name
coercibleTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Coercible") coercibleTyConKey coercibleTyCon
coercibleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "MkCoercible") coercibleDataConKey coercibleDataCon
@@ -329,11 +335,11 @@ typeSymbolKind = TyConApp (promoteTyCon typeSymbolKindCon) []
%************************************************************************
%* *
-\subsection[TysWiredIn-tuples]{The tuple types}
+ Stuff for dealing with tuples
%* *
%************************************************************************
-Note [How tuples work]
+Note [How tuples work] See also Note [Known-key names] in PrelNames
~~~~~~~~~~~~~~~~~~~~~~
* There are three families of tuple TyCons and corresponding
DataCons, (boxed, unboxed, and constraint tuples), expressed by the
@@ -352,6 +358,68 @@ Note [How tuples work]
are not serialised into interface files using OccNames at all.
\begin{code}
+isBuiltInOcc_maybe :: OccName -> Maybe Name
+-- Built in syntax isn't "in scope" so these OccNames
+-- map to wired-in Names with BuiltInSyntax
+isBuiltInOcc_maybe occ
+ = case occNameString occ of
+ "[]" -> choose_ns listTyCon nilDataCon
+ ":" -> Just consDataConName
+ "[::]" -> Just parrTyConName
+ "(##)" -> choose_ns unboxedUnitTyCon unboxedUnitDataCon
+ "()" -> choose_ns unitTyCon unitDataCon
+ '(':'#':',':rest -> parse_tuple UnboxedTuple 2 rest
+ '(':',':rest -> parse_tuple BoxedTuple 2 rest
+ _other -> Nothing
+ where
+ ns = occNameSpace occ
+
+ parse_tuple sort n rest
+ | (',' : rest2) <- rest = parse_tuple sort (n+1) rest2
+ | tail_matches sort rest = choose_ns (tupleTyCon sort n)
+ (tupleCon sort n)
+ | otherwise = Nothing
+
+ tail_matches BoxedTuple ")" = True
+ tail_matches UnboxedTuple "#)" = True
+ tail_matches _ _ = False
+
+ choose_ns tc dc
+ | isTcClsNameSpace ns = Just (getName tc)
+ | isDataConNameSpace ns = Just (getName dc)
+ | otherwise = Just (getName (dataConWorkId dc))
+
+mkTupleOcc :: NameSpace -> TupleSort -> Arity -> OccName
+mkTupleOcc ns sort ar = mkOccName ns str
+ where
+ -- No need to cache these, the caching is done in mk_tuple
+ str = case sort of
+ UnboxedTuple -> '(' : '#' : commas ++ "#)"
+ BoxedTuple -> '(' : commas ++ ")"
+ ConstraintTuple -> '(' : commas ++ ")"
+
+ commas = take (ar-1) (repeat ',')
+
+ -- Cute hack: we reuse the standard tuple OccNames (and hence code)
+ -- for fact tuples, but give them different Uniques so they are not equal.
+ --
+ -- You might think that this will go wrong because isBuiltInOcc_maybe won't
+ -- be able to tell the difference between boxed tuples and constraint tuples. BUT:
+ -- 1. Constraint tuples never occur directly in user code, so it doesn't matter
+ -- that we can't detect them in Orig OccNames originating from the user
+ -- programs (or those built by setRdrNameSpace used on an Exact tuple Name)
+ -- 2. Interface files have a special representation for tuple *occurrences*
+ -- in IfaceTyCons, their workers (in IfaceSyn) and their DataCons (in case
+ -- alternatives). Thus we don't rely on the OccName to figure out what kind
+ -- of tuple an occurrence was trying to use in these situations.
+ -- 3. We *don't* represent tuple data type declarations specially, so those
+ -- are still turned into wired-in names via isBuiltInOcc_maybe. But that's OK
+ -- because we don't actually need to declare constraint tuples thanks to this hack.
+ --
+ -- So basically any OccName like (,,) flowing to isBuiltInOcc_maybe will always
+ -- refer to the standard boxed tuple. Cool :-)
+
+
tupleTyCon :: TupleSort -> Arity -> TyCon
tupleTyCon sort i | i > mAX_TUPLE_SIZE = fst (mk_tuple sort i) -- Build one specially
tupleTyCon BoxedTuple i = fst (boxedTupleArr ! i)
@@ -384,7 +452,7 @@ mk_tuple sort arity = (tycon, tuple_con)
UnboxedTuple -> Nothing
ConstraintTuple -> Nothing
- modu = mkTupleModule sort arity
+ modu = mkTupleModule sort
tc_name = mkWiredInName modu (mkTupleOcc tcName sort arity) tc_uniq
(ATyCon tycon) BuiltInSyntax
tc_kind = mkArrowKinds (map tyVarKind tyvars) res_kind
@@ -831,5 +899,19 @@ promotedTrueDataCon = promoteDataCon trueDataCon
promotedFalseDataCon = promoteDataCon falseDataCon
\end{code}
+Promoted Ordering
+
+\begin{code}
+promotedOrderingTyCon
+ , promotedLTDataCon
+ , promotedEQDataCon
+ , promotedGTDataCon
+ :: TyCon
+promotedOrderingTyCon = promoteTyCon orderingTyCon
+promotedLTDataCon = promoteDataCon ltDataCon
+promotedEQDataCon = promoteDataCon eqDataCon
+promotedGTDataCon = promoteDataCon gtDataCon
+\end{code}
+
diff --git a/compiler/prelude/primops.txt.pp b/compiler/prelude/primops.txt.pp
index 36eec67b7d95..d5566fe3630d 100644
--- a/compiler/prelude/primops.txt.pp
+++ b/compiler/prelude/primops.txt.pp
@@ -247,13 +247,19 @@
primop IntNegOp "negateInt#" Monadic Int# -> Int#
primop IntAddCOp "addIntC#" GenPrimOp Int# -> Int# -> (# Int#, Int# #)
- {Add with carry. First member of result is (wrapped) sum;
- second member is 0 iff no overflow occured.}
+ {Add signed integers reporting overflow.
+ First member of result is the sum truncated to an {\tt Int#};
+ second member is zero if the true sum fits in an {\tt Int#},
+ nonzero if overflow occurred (the sum is either too large
+ or too small to fit in an {\tt Int#}).}
with code_size = 2
primop IntSubCOp "subIntC#" GenPrimOp Int# -> Int# -> (# Int#, Int# #)
- {Subtract with carry. First member of result is (wrapped) difference;
- second member is 0 iff no overflow occured.}
+ {Subtract signed integers reporting overflow.
+ First member of result is the difference truncated to an {\tt Int#};
+ second member is zero if the true difference fits in an {\tt Int#},
+ nonzero if overflow occurred (the difference is either too large
+ or too small to fit in an {\tt Int#}).}
with code_size = 2
primop IntGtOp ">#" Compare Int# -> Int# -> Int#
@@ -380,6 +386,28 @@
primop PopCntOp "popCnt#" Monadic Word# -> Word#
{Count the number of set bits in a word.}
+primop Clz8Op "clz8#" Monadic Word# -> Word#
+ {Count leading zeros in the lower 8 bits of a word.}
+primop Clz16Op "clz16#" Monadic Word# -> Word#
+ {Count leading zeros in the lower 16 bits of a word.}
+primop Clz32Op "clz32#" Monadic Word# -> Word#
+ {Count leading zeros in the lower 32 bits of a word.}
+primop Clz64Op "clz64#" GenPrimOp WORD64 -> Word#
+ {Count leading zeros in a 64-bit word.}
+primop ClzOp "clz#" Monadic Word# -> Word#
+ {Count leading zeros in a word.}
+
+primop Ctz8Op "ctz8#" Monadic Word# -> Word#
+ {Count trailing zeros in the lower 8 bits of a word.}
+primop Ctz16Op "ctz16#" Monadic Word# -> Word#
+ {Count trailing zeros in the lower 16 bits of a word.}
+primop Ctz32Op "ctz32#" Monadic Word# -> Word#
+ {Count trailing zeros in the lower 32 bits of a word.}
+primop Ctz64Op "ctz64#" GenPrimOp WORD64 -> Word#
+ {Count trailing zeros in a 64-bit word.}
+primop CtzOp "ctz#" Monadic Word# -> Word#
+ {Count trailing zeros in a word.}
+
primop BSwap16Op "byteSwap16#" Monadic Word# -> Word#
{Swap bytes in the lower 16 bits of a word. The higher bytes are undefined. }
primop BSwap32Op "byteSwap32#" Monadic Word# -> Word#
@@ -762,54 +790,74 @@
primop CopyArrayOp "copyArray#" GenPrimOp
Array# a -> Int# -> MutableArray# s a -> Int# -> Int# -> State# s -> State# s
- {Copy a range of the Array# to the specified region in the MutableArray#.
- Both arrays must fully contain the specified ranges, but this is not checked.
- The two arrays must not be the same array in different states, but this is not checked either.}
+ {Given a source array, an offset into the source array, a
+ destination array, an offset into the destination array, and a
+ number of elements to copy, copy the elements from the source array
+ to the destination array. Both arrays must fully contain the
+ specified ranges, but this is not checked. The two arrays must not
+ be the same array in different states, but this is not checked
+ either.}
with
+ out_of_line = True
has_side_effects = True
can_fail = True
- code_size = { primOpCodeSizeForeignCall + 4 }
primop CopyMutableArrayOp "copyMutableArray#" GenPrimOp
MutableArray# s a -> Int# -> MutableArray# s a -> Int# -> Int# -> State# s -> State# s
- {Copy a range of the first MutableArray# to the specified region in the second MutableArray#.
- Both arrays must fully contain the specified ranges, but this is not checked.}
+ {Given a source array, an offset into the source array, a
+ destination array, an offset into the destination array, and a
+ number of elements to copy, copy the elements from the source array
+ to the destination array. The source and destination arrays can
+ refer to the same array. Both arrays must fully contain the
+ specified ranges, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
can_fail = True
- code_size = { primOpCodeSizeForeignCall + 4 }
primop CloneArrayOp "cloneArray#" GenPrimOp
Array# a -> Int# -> Int# -> Array# a
- {Return a newly allocated Array# with the specified subrange of the provided Array#.
- The provided Array# should contain the full subrange specified by the two Int#s, but this is not checked.}
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
- code_size = { primOpCodeSizeForeignCall + 4 }
+ can_fail = True
primop CloneMutableArrayOp "cloneMutableArray#" GenPrimOp
MutableArray# s a -> Int# -> Int# -> State# s -> (# State# s, MutableArray# s a #)
- {Return a newly allocated Array# with the specified subrange of the provided Array#.
- The provided MutableArray# should contain the full subrange specified by the two Int#s, but this is not checked.}
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
- code_size = { primOpCodeSizeForeignCall + 4 }
+ can_fail = True
primop FreezeArrayOp "freezeArray#" GenPrimOp
MutableArray# s a -> Int# -> Int# -> State# s -> (# State# s, Array# a #)
- {Return a newly allocated Array# with the specified subrange of the provided MutableArray#.
- The provided MutableArray# should contain the full subrange specified by the two Int#s, but this is not checked.}
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
- code_size = { primOpCodeSizeForeignCall + 4 }
+ can_fail = True
primop ThawArrayOp "thawArray#" GenPrimOp
Array# a -> Int# -> Int# -> State# s -> (# State# s, MutableArray# s a #)
- {Return a newly allocated Array# with the specified subrange of the provided MutableArray#.
- The provided Array# should contain the full subrange specified by the two Int#s, but this is not checked.}
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
- code_size = { primOpCodeSizeForeignCall + 4 }
+ can_fail = True
primop CasArrayOp "casArray#" GenPrimOp
MutableArray# s a -> Int# -> a -> a -> State# s -> (# State# s, Int#, a #)
@@ -819,6 +867,168 @@
has_side_effects = True
+------------------------------------------------------------------------
+section "Small Arrays"
+
+ {Operations on {\tt SmallArray\#}. A {\tt SmallArray\#} works
+ just like an {\tt Array\#}, but with different space use and
+ performance characteristics (that are often useful with small
+ arrays). The {\tt SmallArray\#} and {\tt SmallMutableArray#}
+ lack a `card table'. The purpose of a card table is to avoid
+ having to scan every element of the array on each GC by
+ keeping track of which elements have changed since the last GC
+ and only scanning those that have changed. So the consequence
+ of there being no card table is that the representation is
+ somewhat smaller and the writes are somewhat faster (because
+ the card table does not need to be updated). The disadvantage
+ of course is that for a {\tt SmallMutableArray#} the whole
+ array has to be scanned on each GC. Thus it is best suited for
+ use cases where the mutable array is not long lived, e.g.
+ where a mutable array is initialised quickly and then frozen
+ to become an immutable {\tt SmallArray\#}.
+ }
+
+------------------------------------------------------------------------
+
+primtype SmallArray# a
+
+primtype SmallMutableArray# s a
+
+primop NewSmallArrayOp "newSmallArray#" GenPrimOp
+ Int# -> a -> State# s -> (# State# s, SmallMutableArray# s a #)
+ {Create a new mutable array with the specified number of elements,
+ in the specified state thread,
+ with each element containing the specified initial value.}
+ with
+ out_of_line = True
+ has_side_effects = True
+
+primop SameSmallMutableArrayOp "sameSmallMutableArray#" GenPrimOp
+ SmallMutableArray# s a -> SmallMutableArray# s a -> Int#
+
+primop ReadSmallArrayOp "readSmallArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> State# s -> (# State# s, a #)
+ {Read from specified index of mutable array. Result is not yet evaluated.}
+ with
+ has_side_effects = True
+ can_fail = True
+
+primop WriteSmallArrayOp "writeSmallArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> a -> State# s -> State# s
+ {Write to specified index of mutable array.}
+ with
+ has_side_effects = True
+ can_fail = True
+
+primop SizeofSmallArrayOp "sizeofSmallArray#" GenPrimOp
+ SmallArray# a -> Int#
+ {Return the number of elements in the array.}
+
+primop SizeofSmallMutableArrayOp "sizeofSmallMutableArray#" GenPrimOp
+ SmallMutableArray# s a -> Int#
+ {Return the number of elements in the array.}
+
+primop IndexSmallArrayOp "indexSmallArray#" GenPrimOp
+ SmallArray# a -> Int# -> (# a #)
+ {Read from specified index of immutable array. Result is packaged into
+ an unboxed singleton; the result itself is not yet evaluated.}
+ with
+ can_fail = True
+
+primop UnsafeFreezeSmallArrayOp "unsafeFreezeSmallArray#" GenPrimOp
+ SmallMutableArray# s a -> State# s -> (# State# s, SmallArray# a #)
+ {Make a mutable array immutable, without copying.}
+ with
+ has_side_effects = True
+
+primop UnsafeThawSmallArrayOp "unsafeThawSmallArray#" GenPrimOp
+ SmallArray# a -> State# s -> (# State# s, SmallMutableArray# s a #)
+ {Make an immutable array mutable, without copying.}
+ with
+ out_of_line = True
+ has_side_effects = True
+
+-- The code_size is only correct for the case when the copy family of
+-- primops aren't inlined. It would be nice to keep track of both.
+
+primop CopySmallArrayOp "copySmallArray#" GenPrimOp
+ SmallArray# a -> Int# -> SmallMutableArray# s a -> Int# -> Int# -> State# s -> State# s
+ {Given a source array, an offset into the source array, a
+ destination array, an offset into the destination array, and a
+ number of elements to copy, copy the elements from the source array
+ to the destination array. Both arrays must fully contain the
+ specified ranges, but this is not checked. The two arrays must not
+ be the same array in different states, but this is not checked
+ either.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop CopySmallMutableArrayOp "copySmallMutableArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> SmallMutableArray# s a -> Int# -> Int# -> State# s -> State# s
+ {Given a source array, an offset into the source array, a
+ destination array, an offset into the destination array, and a
+ number of elements to copy, copy the elements from the source array
+ to the destination array. The source and destination arrays can
+ refer to the same array. Both arrays must fully contain the
+ specified ranges, but this is not checked.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop CloneSmallArrayOp "cloneSmallArray#" GenPrimOp
+ SmallArray# a -> Int# -> Int# -> SmallArray# a
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop CloneSmallMutableArrayOp "cloneSmallMutableArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> Int# -> State# s -> (# State# s, SmallMutableArray# s a #)
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop FreezeSmallArrayOp "freezeSmallArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> Int# -> State# s -> (# State# s, SmallArray# a #)
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop ThawSmallArrayOp "thawSmallArray#" GenPrimOp
+ SmallArray# a -> Int# -> Int# -> State# s -> (# State# s, SmallMutableArray# s a #)
+ {Given a source array, an offset into the source array, and a number
+ of elements to copy, create a new array with the elements from the
+ source array. The provided array must fully contain the specified
+ range, but this is not checked.}
+ with
+ out_of_line = True
+ has_side_effects = True
+ can_fail = True
+
+primop CasSmallArrayOp "casSmallArray#" GenPrimOp
+ SmallMutableArray# s a -> Int# -> a -> a -> State# s -> (# State# s, Int#, a #)
+ {Unsafe, machine-level atomic compare and swap on an element within an array.}
+ with
+ out_of_line = True
+ has_side_effects = True
+
------------------------------------------------------------------------
section "Byte Arrays"
{Operations on {\tt ByteArray\#}. A {\tt ByteArray\#} is a just a region of
@@ -864,6 +1074,30 @@
primop SameMutableByteArrayOp "sameMutableByteArray#" GenPrimOp
MutableByteArray# s -> MutableByteArray# s -> Int#
+primop ShrinkMutableByteArrayOp_Char "shrinkMutableByteArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> State# s -> State# s
+ {Shrink mutable byte array to new specified size (in bytes), in
+ the specified state thread. The new size argument must be less than or
+ equal to the current size as reported by {\tt sizeofMutableArray\#}.}
+ with out_of_line = True
+ has_side_effects = True
+
+primop ResizeMutableByteArrayOp_Char "resizeMutableByteArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> State# s -> (# State# s,MutableByteArray# s #)
+ {Resize (unpinned) mutable byte array to new specified size (in bytes).
+ The returned {\tt MutableByteArray\#} is either the original
+ {\tt MutableByteArray\#} resized in-place or, if not possible, a newly
+ allocated (unpinned) {\tt MutableByteArray\#} (with the original content
+ copied over).
+
+ To avoid undefined behaviour, the original {\tt MutableByteArray\#} shall
+ not be accessed anymore after a {\tt resizeMutableByteArray\#} has been
+ performed. Moreover, no reference to the old one should be kept in order
+ to allow garbage collection of the original {\tt MutableByteArray\#} in
+ case a new {\tt MutableByteArray\#} had to be allocated.}
+ with out_of_line = True
+ has_side_effects = True
+
primop UnsafeFreezeByteArrayOp "unsafeFreezeByteArray#" GenPrimOp
MutableByteArray# s -> State# s -> (# State# s, ByteArray# #)
{Make a mutable byte array immutable, without copying.}
@@ -914,34 +1148,42 @@
primop IndexByteArrayOp_Int8 "indexInt8Array#" GenPrimOp
ByteArray# -> Int# -> Int#
+ {Read 8-bit integer; offset in bytes.}
with can_fail = True
primop IndexByteArrayOp_Int16 "indexInt16Array#" GenPrimOp
ByteArray# -> Int# -> Int#
+ {Read 16-bit integer; offset in 16-bit words.}
with can_fail = True
primop IndexByteArrayOp_Int32 "indexInt32Array#" GenPrimOp
ByteArray# -> Int# -> INT32
+ {Read 32-bit integer; offset in 32-bit words.}
with can_fail = True
primop IndexByteArrayOp_Int64 "indexInt64Array#" GenPrimOp
ByteArray# -> Int# -> INT64
+ {Read 64-bit integer; offset in 64-bit words.}
with can_fail = True
primop IndexByteArrayOp_Word8 "indexWord8Array#" GenPrimOp
ByteArray# -> Int# -> Word#
+ {Read 8-bit word; offset in bytes.}
with can_fail = True
primop IndexByteArrayOp_Word16 "indexWord16Array#" GenPrimOp
ByteArray# -> Int# -> Word#
+ {Read 16-bit word; offset in 16-bit words.}
with can_fail = True
primop IndexByteArrayOp_Word32 "indexWord32Array#" GenPrimOp
ByteArray# -> Int# -> WORD32
+ {Read 32-bit word; offset in 32-bit words.}
with can_fail = True
primop IndexByteArrayOp_Word64 "indexWord64Array#" GenPrimOp
ByteArray# -> Int# -> WORD64
+ {Read 64-bit word; offset in 64-bit words.}
with can_fail = True
primop ReadByteArrayOp_Char "readCharArray#" GenPrimOp
@@ -958,11 +1200,13 @@
primop ReadByteArrayOp_Int "readIntArray#" GenPrimOp
MutableByteArray# s -> Int# -> State# s -> (# State# s, Int# #)
+ {Read intger; offset in words.}
with has_side_effects = True
can_fail = True
primop ReadByteArrayOp_Word "readWordArray#" GenPrimOp
MutableByteArray# s -> Int# -> State# s -> (# State# s, Word# #)
+ {Read word; offset in words.}
with has_side_effects = True
can_fail = True
@@ -1171,19 +1415,79 @@
code_size = { primOpCodeSizeForeignCall + 4 }
can_fail = True
+-- Atomic operations
+
+primop AtomicReadByteArrayOp_Int "atomicReadIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array and an offset in Int units, read an element. The
+ index is assumed to be in bounds. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop AtomicWriteByteArrayOp_Int "atomicWriteIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> State# s
+ {Given an array and an offset in Int units, write an element. The
+ index is assumed to be in bounds. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
primop CasByteArrayOp_Int "casIntArray#" GenPrimOp
MutableByteArray# s -> Int# -> Int# -> Int# -> State# s -> (# State# s, Int# #)
- {Machine-level atomic compare and swap on a word within a ByteArray.}
- with
- out_of_line = True
- has_side_effects = True
+ {Given an array, an offset in Int units, the expected old value, and
+ the new value, perform an atomic compare and swap i.e. write the new
+ value if the current value matches the provided old value. Returns
+ the value of the element before the operation. Implies a full memory
+ barrier.}
+ with has_side_effects = True
+ can_fail = True
primop FetchAddByteArrayOp_Int "fetchAddIntArray#" GenPrimOp
MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
- {Machine-level word-sized fetch-and-add within a ByteArray.}
- with
- out_of_line = True
- has_side_effects = True
+ {Given an array, and offset in Int units, and a value to add,
+ atomically add the value to the element. Returns the value of the
+ element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop FetchSubByteArrayOp_Int "fetchSubIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array, and offset in Int units, and a value to subtract,
+ atomically substract the value to the element. Returns the value of
+ the element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop FetchAndByteArrayOp_Int "fetchAndIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array, and offset in Int units, and a value to AND,
+ atomically AND the value to the element. Returns the value of the
+ element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop FetchNandByteArrayOp_Int "fetchNandIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array, and offset in Int units, and a value to NAND,
+ atomically NAND the value to the element. Returns the value of the
+ element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop FetchOrByteArrayOp_Int "fetchOrIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array, and offset in Int units, and a value to OR,
+ atomically OR the value to the element. Returns the value of the
+ element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
+
+primop FetchXorByteArrayOp_Int "fetchXorIntArray#" GenPrimOp
+ MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #)
+ {Given an array, and offset in Int units, and a value to XOR,
+ atomically XOR the value to the element. Returns the value of the
+ element before the operation. Implies a full memory barrier.}
+ with has_side_effects = True
+ can_fail = True
------------------------------------------------------------------------
@@ -1279,9 +1583,9 @@
Both arrays must fully contain the specified ranges, but this is not checked.
The two arrays must not be the same array in different states, but this is not checked either.}
with
+ out_of_line = True
has_side_effects = True
- can_fail = True
- code_size = { primOpCodeSizeForeignCall }
+ can_fail = True
primop CopyMutableArrayArrayOp "copyMutableArrayArray#" GenPrimOp
MutableArrayArray# s -> Int# -> MutableArrayArray# s -> Int# -> Int# -> State# s -> State# s
@@ -1289,9 +1593,9 @@
MutableArrayArray#.
Both arrays must fully contain the specified ranges, but this is not checked.}
with
+ out_of_line = True
has_side_effects = True
- code_size = { primOpCodeSizeForeignCall }
- can_fail = True
+ can_fail = True
------------------------------------------------------------------------
section "Addr#"
@@ -1629,6 +1933,11 @@
strictness = { \ _arity -> mkClosedStrictSig [topDmd] botRes }
-- NB: result is bottom
out_of_line = True
+ has_side_effects = True
+ -- raise# certainly throws a Haskell exception and hence has_side_effects
+ -- It doesn't actually make much difference because the fact that it
+ -- returns bottom independently ensures that we are careful not to discard
+ -- it. But still, it's better to say the Right Thing.
-- raiseIO# needs to be a primop, because exceptions in the IO monad
-- must be *precise* - we don't want the strictness analyser turning
@@ -2245,7 +2554,7 @@
{ Evaluates its first argument to head normal form, and then returns its second
argument as the result. }
-primtype Any k
+primtype Any
{ The type constructor {\tt Any} is type to which you can unsafely coerce any
lifted type, and back.
@@ -2270,8 +2579,11 @@
{\tt length (Any *) ([] (Any *))}
- Note that {\tt Any} is kind polymorphic, and takes a kind {\tt k} as its
- first argument. The kind of {\tt Any} is thus {\tt forall k. k -> k}.}
+ Above, we print kinds explicitly, as if with
+ {\tt -fprint-explicit-kinds}.
+
+ Note that {\tt Any} is kind polymorphic; its kind is thus
+ {\tt forall k. k}.}
primtype AnyK
{ The kind {\tt AnyK} is the kind level counterpart to {\tt Any}. In a
@@ -2364,49 +2676,6 @@
concrete types.
}
-primclass Coercible a b
- { This two-parameter class has instances for types {\tt a} and {\tt b} if
- the compiler can infer that they have the same representation. This class
- does not have regular instances; instead they are created on-the-fly during
- type-checking. Trying to manually declare an instance of {\tt Coercible}
- is an error.
-
- Nevertheless one can pretend that the following three kinds of instances
- exist. First, as a trivial base-case:
-
- {\tt instance a a}
-
- Furthermore, for every type constructor there is
- an instance that allows to coerce under the type constructor. For
- example, let {\tt D} be a prototypical type constructor ({\tt data} or {\tt
- newtype}) with three type arguments, which have roles Nominal,
- Representational resp. Phantom. Then there is an instance of the form
-
- {\tt instance Coercible b b' => Coercible (D a b c) (D a b' c')}
-
- Note that the nominal type arguments are equal, the representational type
- arguments can differ, but need to have a {\tt Coercible} instance
- themself, and the phantom type arguments can be changed arbitrarily.
-
- In SafeHaskell code, this instance is only usable if the constructors of
- every type constructor used in the definition of {\tt D} (including
- those of {\tt D} itself) are in scope.
-
- The third kind of instance exists for every {\tt newtype NT = MkNT T} and
- comes in two variants, namely
-
- {\tt instance Coercible a T => Coercible a NT}
-
- {\tt instance Coercible T b => Coercible NT b}
-
- This instance is only usable if the constructor {\tt MkNT} is in scope.
-
- If, as a library author of a type constructor like {\tt Set a}, you
- want to prevent a user of your module to write
- {\tt coerce :: Set T -> Set NT},
- you need to set the role of {\tt Set}'s type parameter to Nominal.
- }
-
------------------------------------------------------------------------
section "SIMD Vectors"
{Operations on SIMD vectors.}
diff --git a/compiler/profiling/CostCentre.lhs b/compiler/profiling/CostCentre.lhs
index fffd6462b24f..8a6ed044fb54 100644
--- a/compiler/profiling/CostCentre.lhs
+++ b/compiler/profiling/CostCentre.lhs
@@ -1,32 +1,24 @@
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
{-# LANGUAGE BangPatterns, DeriveDataTypeable #-}
-
module CostCentre (
CostCentre(..), CcName, IsCafCC(..),
- -- All abstract except to friend: ParseIface.y
+ -- All abstract except to friend: ParseIface.y
- CostCentreStack,
- CollectedCCs,
+ CostCentreStack,
+ CollectedCCs,
noCCS, currentCCS, dontCareCCS,
noCCSAttached, isCurrentCCS,
maybeSingletonCCS,
- mkUserCC, mkAutoCC, mkAllCafsCC,
+ mkUserCC, mkAutoCC, mkAllCafsCC,
mkSingletonCCS,
isCafCCS, isCafCC, isSccCountCC, sccAbleCC, ccFromThisModule,
- pprCostCentreCore,
+ pprCostCentreCore,
costCentreUserName, costCentreUserNameFS,
costCentreSrcSpan,
- cmpCostCentre -- used for removing dups in a list
+ cmpCostCentre -- used for removing dups in a list
) where
import Binary
@@ -34,7 +26,7 @@ import Var
import Name
import Module
import Unique
-import Outputable
+import Outputable
import FastTypes
import SrcLoc
import FastString
@@ -46,7 +38,7 @@ import Data.Data
-- Cost Centres
-- | A Cost Centre is a single @{-# SCC #-}@ annotation.
-
+
data CostCentre
= NormalCC {
cc_key :: {-# UNPACK #-} !Int,
@@ -66,7 +58,7 @@ data CostCentre
cc_is_caf :: IsCafCC -- see below
}
- | AllCafsCC {
+ | AllCafsCC {
cc_mod :: Module, -- Name of module defining this CC.
cc_loc :: SrcSpan
}
@@ -79,10 +71,10 @@ data IsCafCC = NotCafCC | CafCC
instance Eq CostCentre where
- c1 == c2 = case c1 `cmpCostCentre` c2 of { EQ -> True; _ -> False }
+ c1 == c2 = case c1 `cmpCostCentre` c2 of { EQ -> True; _ -> False }
instance Ord CostCentre where
- compare = cmpCostCentre
+ compare = cmpCostCentre
cmpCostCentre :: CostCentre -> CostCentre -> Ordering
@@ -96,8 +88,8 @@ cmpCostCentre NormalCC {cc_key = n1, cc_mod = m1}
cmpCostCentre other_1 other_2
= let
- !tag1 = tag_CC other_1
- !tag2 = tag_CC other_2
+ !tag1 = tag_CC other_1
+ !tag2 = tag_CC other_2
in
if tag1 <# tag2 then LT else GT
where
@@ -143,7 +135,7 @@ mkAutoCC id mod is_caf
cc_loc = nameSrcSpan (getName id),
cc_is_caf = is_caf
}
- where
+ where
name = getName id
-- beware: only external names are guaranteed to have unique
-- Occnames. If the name is not external, we must append its
@@ -161,28 +153,28 @@ mkAllCafsCC m loc = AllCafsCC { cc_mod = m, cc_loc = loc }
-- | A Cost Centre Stack is something that can be attached to a closure.
-- This is either:
---
+--
-- * the current cost centre stack (CCCS)
-- * a pre-defined cost centre stack (there are several
--- pre-defined CCSs, see below).
+-- pre-defined CCSs, see below).
data CostCentreStack
= NoCCS
- | CurrentCCS -- Pinned on a let(rec)-bound
- -- thunk/function/constructor, this says that the
- -- cost centre to be attached to the object, when it
- -- is allocated, is whatever is in the
- -- current-cost-centre-stack register.
+ | CurrentCCS -- Pinned on a let(rec)-bound
+ -- thunk/function/constructor, this says that the
+ -- cost centre to be attached to the object, when it
+ -- is allocated, is whatever is in the
+ -- current-cost-centre-stack register.
| DontCareCCS -- We need a CCS to stick in static closures
- -- (for data), but we *don't* expect them to
- -- accumulate any costs. But we still need
- -- the placeholder. This CCS is it.
+ -- (for data), but we *don't* expect them to
+ -- accumulate any costs. But we still need
+ -- the placeholder. This CCS is it.
| SingletonCCS CostCentre
- deriving (Eq, Ord) -- needed for Ord on CLabel
+ deriving (Eq, Ord) -- needed for Ord on CLabel
-- synonym for triple which describes the cost centre info in the generated
@@ -196,7 +188,7 @@ type CollectedCCs
noCCS, currentCCS, dontCareCCS :: CostCentreStack
-noCCS = NoCCS
+noCCS = NoCCS
currentCCS = CurrentCCS
dontCareCCS = DontCareCCS
@@ -204,20 +196,20 @@ dontCareCCS = DontCareCCS
-- Predicates on Cost-Centre Stacks
noCCSAttached :: CostCentreStack -> Bool
-noCCSAttached NoCCS = True
-noCCSAttached _ = False
+noCCSAttached NoCCS = True
+noCCSAttached _ = False
isCurrentCCS :: CostCentreStack -> Bool
-isCurrentCCS CurrentCCS = True
-isCurrentCCS _ = False
+isCurrentCCS CurrentCCS = True
+isCurrentCCS _ = False
isCafCCS :: CostCentreStack -> Bool
isCafCCS (SingletonCCS cc) = isCafCC cc
-isCafCCS _ = False
+isCafCCS _ = False
maybeSingletonCCS :: CostCentreStack -> Maybe CostCentre
maybeSingletonCCS (SingletonCCS cc) = Just cc
-maybeSingletonCCS _ = Nothing
+maybeSingletonCCS _ = Nothing
mkSingletonCCS :: CostCentre -> CostCentreStack
mkSingletonCCS cc = SingletonCCS cc
@@ -230,31 +222,31 @@ mkSingletonCCS cc = SingletonCCS cc
-- expression.
instance Outputable CostCentreStack where
- ppr NoCCS = ptext (sLit "NO_CCS")
- ppr CurrentCCS = ptext (sLit "CCCS")
+ ppr NoCCS = ptext (sLit "NO_CCS")
+ ppr CurrentCCS = ptext (sLit "CCCS")
ppr DontCareCCS = ptext (sLit "CCS_DONT_CARE")
ppr (SingletonCCS cc) = ppr cc <> ptext (sLit "_ccs")
-----------------------------------------------------------------------------
-- Printing Cost Centres
---
+--
-- There are several different ways in which we might want to print a
-- cost centre:
---
--- - the name of the cost centre, for profiling output (a C string)
--- - the label, i.e. C label for cost centre in .hc file.
--- - the debugging name, for output in -ddump things
--- - the interface name, for printing in _scc_ exprs in iface files.
---
+--
+-- - the name of the cost centre, for profiling output (a C string)
+-- - the label, i.e. C label for cost centre in .hc file.
+-- - the debugging name, for output in -ddump things
+-- - the interface name, for printing in _scc_ exprs in iface files.
+--
-- The last 3 are derived from costCentreStr below. The first is given
-- by costCentreName.
instance Outputable CostCentre where
ppr cc = getPprStyle $ \ sty ->
- if codeStyle sty
- then ppCostCentreLbl cc
- else text (costCentreUserName cc)
+ if codeStyle sty
+ then ppCostCentreLbl cc
+ else text (costCentreUserName cc)
-- Printing in Core
pprCostCentreCore :: CostCentre -> SDoc
@@ -281,7 +273,7 @@ ppCostCentreLbl (NormalCC {cc_key = k, cc_name = n, cc_mod = m,
= ppr m <> char '_' <> ztext (zEncodeFS n) <> char '_' <>
case is_caf of { CafCC -> ptext (sLit "CAF"); _ -> ppr (mkUniqueGrimily k)} <> text "_cc"
--- This is the name to go in the user-displayed string,
+-- This is the name to go in the user-displayed string,
-- recorded in the cost centre declaration
costCentreUserName :: CostCentre -> String
costCentreUserName = unpackFS . costCentreUserNameFS
diff --git a/compiler/profiling/SCCfinal.lhs b/compiler/profiling/SCCfinal.lhs
index fdcf7447ebb5..78e314ba60de 100644
--- a/compiler/profiling/SCCfinal.lhs
+++ b/compiler/profiling/SCCfinal.lhs
@@ -2,6 +2,8 @@
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
\begin{code}
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
-- Modify and collect code generation for final STG program
@@ -32,6 +34,7 @@ import UniqSupply ( UniqSupply )
import ListSetOps ( removeDups )
import Outputable
import DynFlags
+import CoreSyn ( Tickish(..) )
import FastString
import SrcLoc
import Util
@@ -93,7 +96,7 @@ stgMassageForProfiling dflags mod_name _us stg_binds
do_top_rhs :: Id -> StgRhs -> MassageM StgRhs
do_top_rhs _ (StgRhsClosure _ _ _ _ _ []
- (StgSCC _cc False{-not tick-} _push (StgConApp con args)))
+ (StgTick (ProfNote _cc False{-not tick-} _push) (StgConApp con args)))
| not (isDllConApp dflags mod_name con args)
-- Trivial _scc_ around nothing but static data
-- Eliminate _scc_ ... and turn into StgRhsCon
@@ -146,10 +149,14 @@ stgMassageForProfiling dflags mod_name _us stg_binds
do_expr (StgOpApp con args res_ty)
= return (StgOpApp con args res_ty)
- do_expr (StgSCC cc tick push expr) = do -- Ha, we found a cost centre!
+ do_expr (StgTick note@(ProfNote cc _ _) expr) = do -- Ha, we found a cost centre!
collectCC cc
expr' <- do_expr expr
- return (StgSCC cc tick push expr')
+ return (StgTick note expr')
+
+ do_expr (StgTick ti expr) = do
+ expr' <- do_expr expr
+ return (StgTick ti expr')
do_expr (StgCase expr fv1 fv2 bndr srt alt_type alts) = do
expr' <- do_expr expr
@@ -168,10 +175,6 @@ stgMassageForProfiling dflags mod_name _us stg_binds
(b,e) <- do_let b e
return (StgLetNoEscape lvs1 lvs2 b e)
- do_expr (StgTick m n expr) = do
- expr' <- do_expr expr
- return (StgTick m n expr')
-
do_expr other = pprPanic "SCCfinal.do_expr" (ppr other)
----------------------------------
@@ -201,7 +204,7 @@ stgMassageForProfiling dflags mod_name _us stg_binds
-- We should really attach (PushCC cc CurrentCCS) to the rhs,
-- but need to reinstate PushCC for that.
do_rhs (StgRhsClosure _closure_cc _bi _fv _u _srt []
- (StgSCC cc False{-not tick-} _push (StgConApp con args)))
+ (StgTick (ProfNote cc False{-not tick-} _push) (StgConApp con args)))
= do collectCC cc
return (StgRhsCon currentCCS con args)
diff --git a/compiler/rename/RnBinds.lhs b/compiler/rename/RnBinds.lhs
index ed1343f23d42..0f9f44aed6c7 100644
--- a/compiler/rename/RnBinds.lhs
+++ b/compiler/rename/RnBinds.lhs
@@ -9,7 +9,7 @@ type-synonym declarations; those cannot be done at this stage because
they may be affected by renaming (which isn't fully worked out yet).
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -47,7 +47,7 @@ import NameSet
import RdrName ( RdrName, rdrNameOcc )
import SrcLoc
import ListSetOps ( findDupsEq )
-import BasicTypes ( RecFlag(..), Origin )
+import BasicTypes ( RecFlag(..) )
import Digraph ( SCC(..) )
import Bag
import Outputable
@@ -275,7 +275,7 @@ rnValBindsLHS :: NameMaker
-> HsValBinds RdrName
-> RnM (HsValBindsLR Name RdrName)
rnValBindsLHS topP (ValBindsIn mbinds sigs)
- = do { mbinds' <- mapBagM (wrapOriginLocM (rnBindLHS topP doc)) mbinds
+ = do { mbinds' <- mapBagM (wrapLocM (rnBindLHS topP doc)) mbinds
; return $ ValBindsIn mbinds' sigs }
where
bndrs = collectHsBindsBinders mbinds
@@ -433,20 +433,27 @@ rnBindLHS name_maker _ bind@(FunBind { fun_id = name@(L nameLoc _) })
= do { newname <- applyNameMaker name_maker name
; return (bind { fun_id = L nameLoc newname }) }
-rnBindLHS name_maker _ bind@(PatSynBind{ patsyn_id = rdrname@(L nameLoc _) })
- = do { addLocM checkConName rdrname
+rnBindLHS name_maker _ (PatSynBind psb@PSB{ psb_id = rdrname@(L nameLoc _) })
+ = do { unless (isTopRecNameMaker name_maker) $
+ addErr localPatternSynonymErr
+ ; addLocM checkConName rdrname
; name <- applyNameMaker name_maker rdrname
- ; return (bind{ patsyn_id = L nameLoc name }) }
+ ; return (PatSynBind psb{ psb_id = L nameLoc name }) }
+ where
+ localPatternSynonymErr :: SDoc
+ localPatternSynonymErr
+ = hang (ptext (sLit "Illegal pattern synonym declaration"))
+ 2 (ptext (sLit "Pattern synonym declarations are only valid in the top-level scope"))
rnBindLHS _ _ b = pprPanic "rnBindHS" (ppr b)
rnLBind :: (Name -> [Name]) -- Signature tyvar function
- -> (Origin, LHsBindLR Name RdrName)
- -> RnM ((Origin, LHsBind Name), [Name], Uses)
-rnLBind sig_fn (origin, (L loc bind))
+ -> LHsBindLR Name RdrName
+ -> RnM (LHsBind Name, [Name], Uses)
+rnLBind sig_fn (L loc bind)
= setSrcSpan loc $
do { (bind', bndrs, dus) <- rnBind sig_fn bind
- ; return ((origin, L loc bind'), bndrs, dus) }
+ ; return (L loc bind', bndrs, dus) }
-- assumes the left-hands-side vars are in scope
rnBind :: (Name -> [Name]) -- Signature tyvar function
@@ -469,8 +476,9 @@ rnBind _ bind@(PatBind { pat_lhs = pat
bndrs = collectPatBinders pat
bind' = bind { pat_rhs = grhss', bind_fvs = fvs' }
is_wild_pat = case pat of
- L _ (WildPat {}) -> True
- _ -> False
+ L _ (WildPat {}) -> True
+ L _ (BangPat (L _ (WildPat {}))) -> True -- #9127
+ _ -> False
-- Warn if the pattern binds no variables, except for the
-- entirely-explicit idiom _ = rhs
@@ -507,15 +515,37 @@ rnBind sig_fn bind@(FunBind { fun_id = name
[plain_name], rhs_fvs)
}
-rnBind _sig_fn bind@(PatSynBind { patsyn_id = L _ name
- , patsyn_args = details
- , patsyn_def = pat
- , patsyn_dir = dir })
+rnBind sig_fn (PatSynBind bind)
+ = do { (bind', name, fvs) <- rnPatSynBind sig_fn bind
+ ; return (PatSynBind bind', name, fvs) }
+
+rnBind _ b = pprPanic "rnBind" (ppr b)
+
+{-
+Note [Free-variable space leak]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We have
+ fvs' = trim fvs
+and we seq fvs' before turning it as part of a record.
+
+The reason is that trim is sometimes something like
+ \xs -> intersectNameSet (mkNameSet bound_names) xs
+and we don't want to retain the list bound_names. This showed up in
+trac ticket #1136.
+-}
+
+rnPatSynBind :: (Name -> [Name]) -- Signature tyvar function
+ -> PatSynBind Name RdrName
+ -> RnM (PatSynBind Name Name, [Name], Uses)
+rnPatSynBind _sig_fn bind@(PSB { psb_id = L _ name
+ , psb_args = details
+ , psb_def = pat
+ , psb_dir = dir })
-- invariant: no free vars here when it's a FunBind
= do { pattern_synonym_ok <- xoptM Opt_PatternSynonyms
; unless pattern_synonym_ok (addErr patternSynonymErr)
- ; ((pat', details'), fvs) <- rnPat PatSyn pat $ \pat' -> do
+ ; ((pat', details'), fvs1) <- rnPat PatSyn pat $ \pat' -> do
-- We check the 'RdrName's instead of the 'Name's
-- so that the binding locations are reported
-- from the left-hand side
@@ -531,23 +561,28 @@ rnBind _sig_fn bind@(PatSynBind { patsyn_id = L _ name
-- ; checkPrecMatch -- TODO
; return (InfixPatSyn name1 name2, mkFVs (map unLoc [name1, name2])) }
; return ((pat', details'), fvs) }
- ; dir' <- case dir of
- Unidirectional -> return Unidirectional
- ImplicitBidirectional -> return ImplicitBidirectional
+ ; (dir', fvs2) <- case dir of
+ Unidirectional -> return (Unidirectional, emptyFVs)
+ ImplicitBidirectional -> return (ImplicitBidirectional, emptyFVs)
+ ExplicitBidirectional mg ->
+ do { (mg', fvs) <- rnMatchGroup PatSyn rnLExpr mg
+ ; return (ExplicitBidirectional mg', fvs) }
; mod <- getModule
- ; let fvs' = filterNameSet (nameIsLocalOrFrom mod) fvs
+ ; let fvs = fvs1 `plusFV` fvs2
+ fvs' = filterNameSet (nameIsLocalOrFrom mod) fvs
-- Keep locally-defined Names
-- As well as dependency analysis, we need these for the
-- MonoLocalBinds test in TcBinds.decideGeneralisationPlan
- ; let bind' = bind{ patsyn_args = details'
- , patsyn_def = pat'
- , patsyn_dir = dir'
- , bind_fvs = fvs' }
+ ; let bind' = bind{ psb_args = details'
+ , psb_def = pat'
+ , psb_dir = dir'
+ , psb_fvs = fvs' }
; fvs' `seq` -- See Note [Free-variable space leak]
- return (bind', [name], fvs)
+ return (bind', [name], fvs1)
+ -- See Note [Pattern synonym wrappers don't yield dependencies]
}
where
lookupVar = wrapLocM lookupOccRn
@@ -557,24 +592,38 @@ rnBind _sig_fn bind@(PatSynBind { patsyn_id = L _ name
= hang (ptext (sLit "Illegal pattern synonym declaration"))
2 (ptext (sLit "Use -XPatternSynonyms to enable this extension"))
+{-
+Note [Pattern synonym wrappers don't yield dependencies]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-rnBind _ b = pprPanic "rnBind" (ppr b)
+When renaming a pattern synonym that has an explicit wrapper,
+references in the wrapper definition should not be used when
+calculating dependencies. For example, consider the following pattern
+synonym definition:
-{-
-Note [Free-variable space leak]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We have
- fvs' = trim fvs
-and we seq fvs' before turning it as part of a record.
+pattern P x <- C1 x where
+ P x = f (C1 x)
+
+f (P x) = C2 x
+
+In this case, 'P' needs to be typechecked in two passes:
+
+1. Typecheck the pattern definition of 'P', which fully determines the
+type of 'P'. This step doesn't require knowing anything about 'f',
+since the wrapper definition is not looked at.
+
+2. Typecheck the wrapper definition, which needs the typechecked
+definition of 'f' to be in scope.
+
+This behaviour is implemented in 'tcValBinds', but it crucially
+depends on 'P' not being put in a recursive group with 'f' (which
+would make it look like a recursive pattern synonym a la 'pattern P =
+P' which is unsound and rejected).
-The reason is that trim is sometimes something like
- \xs -> intersectNameSet (mkNameSet bound_names) xs
-and we don't want to retain the list bound_names. This showed up in
-trac ticket #1136.
-}
---------------------
-depAnalBinds :: Bag ((Origin, LHsBind Name), [Name], Uses)
+depAnalBinds :: Bag (LHsBind Name, [Name], Uses)
-> ([(RecFlag, LHsBinds Name)], DefUses)
-- Dependency analysis; this is important so that
-- unused-binding reporting is accurate
@@ -659,10 +708,9 @@ rnMethodBinds cls sig_fn binds
; foldlM do_one (emptyBag, emptyFVs) (bagToList binds) }
where
meth_names = collectMethodBinders binds
- do_one (binds,fvs) (origin,bind)
+ do_one (binds,fvs) bind
= do { (bind', fvs_bind) <- rnMethodBind cls sig_fn bind
- ; let bind'' = mapBag (\bind -> (origin,bind)) bind'
- ; return (binds `unionBags` bind'', fvs_bind `plusFV` fvs) }
+ ; return (binds `unionBags` bind', fvs_bind `plusFV` fvs) }
rnMethodBind :: Name
-> (Name -> [Name])
@@ -670,7 +718,7 @@ rnMethodBind :: Name
-> RnM (Bag (LHsBindLR Name Name), FreeVars)
rnMethodBind cls sig_fn
(L loc bind@(FunBind { fun_id = name, fun_infix = is_infix
- , fun_matches = MG { mg_alts = matches } }))
+ , fun_matches = MG { mg_alts = matches, mg_origin = origin } }))
= setSrcSpan loc $ do
sel_name <- wrapLocM (lookupInstDeclBndr cls (ptext (sLit "method"))) name
let plain_name = unLoc sel_name
@@ -678,7 +726,7 @@ rnMethodBind cls sig_fn
(new_matches, fvs) <- bindSigTyVarsFV (sig_fn plain_name) $
mapFvRn (rnMatch (FunRhs plain_name is_infix) rnLExpr) matches
- let new_group = mkMatchGroup new_matches
+ let new_group = mkMatchGroup origin new_matches
when is_infix $ checkPrecMatch plain_name new_group
return (unitBag (L loc (bind { fun_id = sel_name
@@ -882,11 +930,11 @@ rnMatchGroup :: Outputable (body RdrName) => HsMatchContext Name
-> (Located (body RdrName) -> RnM (Located (body Name), FreeVars))
-> MatchGroup RdrName (Located (body RdrName))
-> RnM (MatchGroup Name (Located (body Name)), FreeVars)
-rnMatchGroup ctxt rnBody (MG { mg_alts = ms })
+rnMatchGroup ctxt rnBody (MG { mg_alts = ms, mg_origin = origin })
= do { empty_case_ok <- xoptM Opt_EmptyCase
; when (null ms && not empty_case_ok) (addErr (emptyCaseErr ctxt))
; (new_ms, ms_fvs) <- mapFvRn (rnMatch ctxt rnBody) ms
- ; return (mkMatchGroup new_ms, ms_fvs) }
+ ; return (mkMatchGroup origin new_ms, ms_fvs) }
rnMatch :: Outputable (body RdrName) => HsMatchContext Name
-> (Located (body RdrName) -> RnM (Located (body Name), FreeVars))
diff --git a/compiler/rename/RnEnv.lhs b/compiler/rename/RnEnv.lhs
index f0d184097022..f333a239a195 100644
--- a/compiler/rename/RnEnv.lhs
+++ b/compiler/rename/RnEnv.lhs
@@ -4,6 +4,8 @@
\section[RnEnv]{Environment manipulation for the renamer monad}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module RnEnv (
newTopSrcBinder,
lookupLocatedTopBndrRn, lookupTopBndrRn,
@@ -38,10 +40,7 @@ module RnEnv (
warnUnusedMatches,
warnUnusedTopBinds, warnUnusedLocalBinds,
dataTcOccs, kindSigErr, perhapsForallMsg,
- HsDocContext(..), docOfHsDocContext,
-
- -- FsEnv
- FastStringEnv, emptyFsEnv, lookupFsEnv, extendFsEnv, mkFsEnv
+ HsDocContext(..), docOfHsDocContext
) where
#include "HsVersions.h"
@@ -59,7 +58,6 @@ import NameSet
import NameEnv
import Avail
import Module
-import UniqFM
import ConLike
import DataCon ( dataConFieldLabels, dataConTyCon )
import TyCon ( isTupleTyCon, tyConArity )
@@ -270,14 +268,29 @@ lookupExactOcc name
; return name
}
- [gre] -> return (gre_name gre)
- _ -> pprPanic "lookupExactOcc" (ppr name $$ ppr gres) }
+ [gre] -> return (gre_name gre)
+ (gre:_) -> do {addErr dup_nm_err
+ ; return (gre_name gre)
+ }
+ -- We can get more than one GRE here, if there are multiple
+ -- bindings for the same name. Sometimes they are caught later
+ -- by findLocalDupsRdrEnv, like in this example (Trac #8932):
+ -- $( [d| foo :: a->a; foo x = x |])
+ -- foo = True
+ -- But when the names are totally identical, we panic (Trac #7241):
+ -- $(newName "Foo" >>= \o -> return [DataD [] o [] [RecC o []] [''Show]])
+ -- So, let's emit an error here, even if it will lead to duplication in some cases.
+ }
where
exact_nm_err = hang (ptext (sLit "The exact Name") <+> quotes (ppr name) <+> ptext (sLit "is not in scope"))
2 (vcat [ ptext (sLit "Probable cause: you used a unique Template Haskell name (NameU), ")
, ptext (sLit "perhaps via newName, but did not bind it")
, ptext (sLit "If that's it, then -ddump-splices might be useful") ])
+ dup_nm_err = hang (ptext (sLit "Duplicate exact Name") <+> quotes (ppr $ nameOccName name))
+ 2 (vcat [ ptext (sLit "Probable cause: you used a unique Template Haskell name (NameU), ")
+ , ptext (sLit "perhaps via newName, but bound it multiple times")
+ , ptext (sLit "If that's it, then -ddump-splices might be useful") ])
-----------------------------------------------
lookupInstDeclBndr :: Name -> SDoc -> RdrName -> RnM Name
@@ -830,7 +843,7 @@ as if there was an "import qualified M" declaration for every
module.
If we fail we just return Nothing, rather than bleating
-about "attempting to use module ‛D’ (./D.hs) which is not loaded"
+about "attempting to use module ‘D’ (./D.hs) which is not loaded"
which is what loadSrcInterface does.
Note [Safe Haskell and GHCi]
@@ -1071,20 +1084,6 @@ The extended lookup is also used in other places, like resolution of
deprecation declarations, and lookup of names in GHCi.
\begin{code}
---------------------------------
-type FastStringEnv a = UniqFM a -- Keyed by FastString
-
-
-emptyFsEnv :: FastStringEnv a
-lookupFsEnv :: FastStringEnv a -> FastString -> Maybe a
-extendFsEnv :: FastStringEnv a -> FastString -> a -> FastStringEnv a
-mkFsEnv :: [(FastString,a)] -> FastStringEnv a
-
-emptyFsEnv = emptyUFM
-lookupFsEnv = lookupUFM
-extendFsEnv = addToUFM
-mkFsEnv = listToUFM
-
--------------------------------
type MiniFixityEnv = FastStringEnv (Located Fixity)
-- Mini fixity env for the names we're about
@@ -1103,7 +1102,7 @@ type MiniFixityEnv = FastStringEnv (Located Fixity)
addLocalFixities :: MiniFixityEnv -> [Name] -> RnM a -> RnM a
addLocalFixities mini_fix_env names thing_inside
- = extendFixityEnv (mapCatMaybes find_fixity names) thing_inside
+ = extendFixityEnv (mapMaybe find_fixity names) thing_inside
where
find_fixity name
= case lookupFsEnv mini_fix_env (occNameFS occ) of
@@ -1453,7 +1452,7 @@ unknownNameSuggestErr where_look tried_rdr_name
all_possibilities
= [ (showPpr dflags r, (r, Left loc))
| (r,loc) <- local_possibilities local_env ]
- ++ [ (showPpr dflags r, rp) | (r,rp) <- global_possibilities global_env ]
+ ++ [ (showPpr dflags r, rp) | (r, rp) <- global_possibilities global_env ]
suggest = fuzzyLookup (showPpr dflags tried_rdr_name) all_possibilities
perhaps = ptext (sLit "Perhaps you meant")
@@ -1465,19 +1464,24 @@ unknownNameSuggestErr where_look tried_rdr_name
; return extra_err }
where
pp_item :: (RdrName, HowInScope) -> SDoc
- pp_item (rdr, Left loc) = quotes (ppr rdr) <+> loc' -- Locally defined
+ pp_item (rdr, Left loc) = pp_ns rdr <+> quotes (ppr rdr) <+> loc' -- Locally defined
where loc' = case loc of
UnhelpfulSpan l -> parens (ppr l)
RealSrcSpan l -> parens (ptext (sLit "line") <+> int (srcSpanStartLine l))
- pp_item (rdr, Right is) = quotes (ppr rdr) <+> -- Imported
+ pp_item (rdr, Right is) = pp_ns rdr <+> quotes (ppr rdr) <+> -- Imported
parens (ptext (sLit "imported from") <+> ppr (is_mod is))
+ pp_ns :: RdrName -> SDoc
+ pp_ns rdr | ns /= tried_ns = pprNameSpace ns
+ | otherwise = empty
+ where ns = rdrNameSpace rdr
+
tried_occ = rdrNameOcc tried_rdr_name
tried_is_sym = isSymOcc tried_occ
tried_ns = occNameSpace tried_occ
tried_is_qual = isQual tried_rdr_name
- correct_name_space occ = occNameSpace occ == tried_ns
+ correct_name_space occ = nameSpacesRelated (occNameSpace occ) tried_ns
&& isSymOcc occ == tried_is_sym
-- Treat operator and non-operators as non-matching
-- This heuristic avoids things like
diff --git a/compiler/rename/RnExpr.lhs b/compiler/rename/RnExpr.lhs
index 01e8a4492d7c..697303f276e8 100644
--- a/compiler/rename/RnExpr.lhs
+++ b/compiler/rename/RnExpr.lhs
@@ -10,6 +10,8 @@ general, all of these functions return a renamed thing, and a set of
free variables.
\begin{code}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
module RnExpr (
rnLExpr, rnExpr, rnStmts
) where
@@ -38,6 +40,7 @@ import UniqSet
import Data.List
import Util
import ListSetOps ( removeDups )
+import ErrUtils
import Outputable
import SrcLoc
import FastString
@@ -45,16 +48,6 @@ import Control.Monad
import TysWiredIn ( nilDataConName )
\end{code}
-
-\begin{code}
--- XXX
-thenM :: Monad a => a b -> (b -> a c) -> a c
-thenM = (>>=)
-
-thenM_ :: Monad a => a b -> a c -> a c
-thenM_ = (>>)
-\end{code}
-
%************************************************************************
%* *
\subsubsection{Expressions}
@@ -66,16 +59,13 @@ rnExprs :: [LHsExpr RdrName] -> RnM ([LHsExpr Name], FreeVars)
rnExprs ls = rnExprs' ls emptyUniqSet
where
rnExprs' [] acc = return ([], acc)
- rnExprs' (expr:exprs) acc
- = rnLExpr expr `thenM` \ (expr', fvExpr) ->
-
+ rnExprs' (expr:exprs) acc =
+ do { (expr', fvExpr) <- rnLExpr expr
-- Now we do a "seq" on the free vars because typically it's small
-- or empty, especially in very long lists of constants
- let
- acc' = acc `plusFV` fvExpr
- in
- acc' `seq` rnExprs' exprs acc' `thenM` \ (exprs', fvExprs) ->
- return (expr':exprs', fvExprs)
+ ; let acc' = acc `plusFV` fvExpr
+ ; (exprs', fvExprs) <- acc' `seq` rnExprs' exprs acc'
+ ; return (expr':exprs', fvExprs) }
\end{code}
Variables. We look up the variable and return the resulting name.
@@ -120,27 +110,25 @@ rnExpr (HsIPVar v)
= return (HsIPVar v, emptyFVs)
rnExpr (HsLit lit@(HsString s))
- = do {
- opt_OverloadedStrings <- xoptM Opt_OverloadedStrings
+ = do { opt_OverloadedStrings <- xoptM Opt_OverloadedStrings
; if opt_OverloadedStrings then
rnExpr (HsOverLit (mkHsIsString s placeHolderType))
- else -- Same as below
- rnLit lit `thenM_`
- return (HsLit lit, emptyFVs)
- }
+ else do {
+ ; rnLit lit
+ ; return (HsLit lit, emptyFVs) } }
rnExpr (HsLit lit)
- = rnLit lit `thenM_`
- return (HsLit lit, emptyFVs)
+ = do { rnLit lit
+ ; return (HsLit lit, emptyFVs) }
rnExpr (HsOverLit lit)
- = rnOverLit lit `thenM` \ (lit', fvs) ->
- return (HsOverLit lit', fvs)
+ = do { (lit', fvs) <- rnOverLit lit
+ ; return (HsOverLit lit', fvs) }
rnExpr (HsApp fun arg)
- = rnLExpr fun `thenM` \ (fun',fvFun) ->
- rnLExpr arg `thenM` \ (arg',fvArg) ->
- return (HsApp fun' arg', fvFun `plusFV` fvArg)
+ = do { (fun',fvFun) <- rnLExpr fun
+ ; (arg',fvArg) <- rnLExpr arg
+ ; return (HsApp fun' arg', fvFun `plusFV` fvArg) }
rnExpr (OpApp e1 (L op_loc (HsVar op_rdr)) _ e2)
= do { (e1', fv_e1) <- rnLExpr e1
@@ -163,10 +151,10 @@ rnExpr (OpApp _ other_op _ _)
, ptext (sLit "(Probably resulting from a Template Haskell splice)") ])
rnExpr (NegApp e _)
- = rnLExpr e `thenM` \ (e', fv_e) ->
- lookupSyntaxName negateName `thenM` \ (neg_name, fv_neg) ->
- mkNegAppRn e' neg_name `thenM` \ final_e ->
- return (final_e, fv_e `plusFV` fv_neg)
+ = do { (e', fv_e) <- rnLExpr e
+ ; (neg_name, fv_neg) <- lookupSyntaxName negateName
+ ; final_e <- mkNegAppRn e' neg_name
+ ; return (final_e, fv_e `plusFV` fv_neg) }
------------------------------------------
-- Template Haskell extensions
@@ -178,10 +166,10 @@ rnExpr (HsSpliceE is_typed splice) = rnSpliceExpr is_typed splice
rnExpr (HsQuasiQuoteE qq)
- = runQuasiQuoteExpr qq `thenM` \ lexpr' ->
- -- Wrap the result of the quasi-quoter in parens so that we don't
- -- lose the outermost location set by runQuasiQuote (#7918)
- rnExpr (HsPar lexpr')
+ = do { lexpr' <- runQuasiQuoteExpr qq
+ -- Wrap the result of the quasi-quoter in parens so that we don't
+ -- lose the outermost location set by runQuasiQuote (#7918)
+ ; rnExpr (HsPar lexpr') }
---------------------------------------------
-- Sections
@@ -205,33 +193,33 @@ rnExpr expr@(SectionR {})
---------------------------------------------
rnExpr (HsCoreAnn ann expr)
- = rnLExpr expr `thenM` \ (expr', fvs_expr) ->
- return (HsCoreAnn ann expr', fvs_expr)
+ = do { (expr', fvs_expr) <- rnLExpr expr
+ ; return (HsCoreAnn ann expr', fvs_expr) }
rnExpr (HsSCC lbl expr)
- = rnLExpr expr `thenM` \ (expr', fvs_expr) ->
- return (HsSCC lbl expr', fvs_expr)
+ = do { (expr', fvs_expr) <- rnLExpr expr
+ ; return (HsSCC lbl expr', fvs_expr) }
rnExpr (HsTickPragma info expr)
- = rnLExpr expr `thenM` \ (expr', fvs_expr) ->
- return (HsTickPragma info expr', fvs_expr)
+ = do { (expr', fvs_expr) <- rnLExpr expr
+ ; return (HsTickPragma info expr', fvs_expr) }
rnExpr (HsLam matches)
- = rnMatchGroup LambdaExpr rnLExpr matches `thenM` \ (matches', fvMatch) ->
- return (HsLam matches', fvMatch)
+ = do { (matches', fvMatch) <- rnMatchGroup LambdaExpr rnLExpr matches
+ ; return (HsLam matches', fvMatch) }
rnExpr (HsLamCase arg matches)
- = rnMatchGroup CaseAlt rnLExpr matches `thenM` \ (matches', fvs_ms) ->
- return (HsLamCase arg matches', fvs_ms)
+ = do { (matches', fvs_ms) <- rnMatchGroup CaseAlt rnLExpr matches
+ ; return (HsLamCase arg matches', fvs_ms) }
rnExpr (HsCase expr matches)
- = rnLExpr expr `thenM` \ (new_expr, e_fvs) ->
- rnMatchGroup CaseAlt rnLExpr matches `thenM` \ (new_matches, ms_fvs) ->
- return (HsCase new_expr new_matches, e_fvs `plusFV` ms_fvs)
+ = do { (new_expr, e_fvs) <- rnLExpr expr
+ ; (new_matches, ms_fvs) <- rnMatchGroup CaseAlt rnLExpr matches
+ ; return (HsCase new_expr new_matches, e_fvs `plusFV` ms_fvs) }
rnExpr (HsLet binds expr)
- = rnLocalBindsAndThen binds $ \ binds' ->
- rnLExpr expr `thenM` \ (expr',fvExpr) ->
- return (HsLet binds' expr', fvExpr)
+ = rnLocalBindsAndThen binds $ \binds' -> do
+ { (expr',fvExpr) <- rnLExpr expr
+ ; return (HsLet binds' expr', fvExpr) }
rnExpr (HsDo do_or_lc stmts _)
= do { ((stmts', _), fvs) <- rnStmts do_or_lc rnLExpr stmts (\ _ -> return ((), emptyFVs))
@@ -248,8 +236,8 @@ rnExpr (ExplicitList _ _ exps)
return (ExplicitList placeHolderType Nothing exps', fvs) }
rnExpr (ExplicitPArr _ exps)
- = rnExprs exps `thenM` \ (exps', fvs) ->
- return (ExplicitPArr placeHolderType exps', fvs)
+ = do { (exps', fvs) <- rnExprs exps
+ ; return (ExplicitPArr placeHolderType exps', fvs) }
rnExpr (ExplicitTuple tup_args boxity)
= do { checkTupleSection tup_args
@@ -290,8 +278,8 @@ rnExpr (HsMultiIf ty alts)
; return (HsMultiIf ty alts', fvs) }
rnExpr (HsType a)
- = rnLHsType HsTypeCtx a `thenM` \ (t, fvT) ->
- return (HsType t, fvT)
+ = do { (t, fvT) <- rnLHsType HsTypeCtx a
+ ; return (HsType t, fvT) }
rnExpr (ArithSeq _ _ seq)
= do { opt_OverloadedLists <- xoptM Opt_OverloadedLists
@@ -304,8 +292,8 @@ rnExpr (ArithSeq _ _ seq)
return (ArithSeq noPostTcExpr Nothing new_seq, fvs) }
rnExpr (PArrSeq _ seq)
- = rnArithSeq seq `thenM` \ (new_seq, fvs) ->
- return (PArrSeq noPostTcExpr new_seq, fvs)
+ = do { (new_seq, fvs) <- rnArithSeq seq
+ ; return (PArrSeq noPostTcExpr new_seq, fvs) }
\end{code}
These three are pattern syntax appearing in expressions.
@@ -332,9 +320,9 @@ rnExpr e@(ELazyPat {}) = patSynErr e
\begin{code}
rnExpr (HsProc pat body)
= newArrowScope $
- rnPat ProcExpr pat $ \ pat' ->
- rnCmdTop body `thenM` \ (body',fvBody) ->
- return (HsProc pat' body', fvBody)
+ rnPat ProcExpr pat $ \ pat' -> do
+ { (body',fvBody) <- rnCmdTop body
+ ; return (HsProc pat' body', fvBody) }
-- Ideally, these would be done in parsing, but to keep parsing simple, we do it here.
rnExpr e@(HsArrApp {}) = arrowFail e
@@ -402,9 +390,9 @@ rnHsRecBinds ctxt rec_binds@(HsRecFields { rec_dotdot = dd })
rnCmdArgs :: [LHsCmdTop RdrName] -> RnM ([LHsCmdTop Name], FreeVars)
rnCmdArgs [] = return ([], emptyFVs)
rnCmdArgs (arg:args)
- = rnCmdTop arg `thenM` \ (arg',fvArg) ->
- rnCmdArgs args `thenM` \ (args',fvArgs) ->
- return (arg':args', fvArg `plusFV` fvArgs)
+ = do { (arg',fvArg) <- rnCmdTop arg
+ ; (args',fvArgs) <- rnCmdArgs args
+ ; return (arg':args', fvArg `plusFV` fvArgs) }
rnCmdTop :: LHsCmdTop RdrName -> RnM (LHsCmdTop Name, FreeVars)
rnCmdTop = wrapLocFstM rnCmdTop'
@@ -425,10 +413,10 @@ rnLCmd = wrapLocFstM rnCmd
rnCmd :: HsCmd RdrName -> RnM (HsCmd Name, FreeVars)
rnCmd (HsCmdArrApp arrow arg _ ho rtl)
- = select_arrow_scope (rnLExpr arrow) `thenM` \ (arrow',fvArrow) ->
- rnLExpr arg `thenM` \ (arg',fvArg) ->
- return (HsCmdArrApp arrow' arg' placeHolderType ho rtl,
- fvArrow `plusFV` fvArg)
+ = do { (arrow',fvArrow) <- select_arrow_scope (rnLExpr arrow)
+ ; (arg',fvArg) <- rnLExpr arg
+ ; return (HsCmdArrApp arrow' arg' placeHolderType ho rtl,
+ fvArrow `plusFV` fvArg) }
where
select_arrow_scope tc = case ho of
HsHigherOrderApp -> tc
@@ -441,42 +429,37 @@ rnCmd (HsCmdArrApp arrow arg _ ho rtl)
-- infix form
rnCmd (HsCmdArrForm op (Just _) [arg1, arg2])
- = escapeArrowScope (rnLExpr op)
- `thenM` \ (op',fv_op) ->
- let L _ (HsVar op_name) = op' in
- rnCmdTop arg1 `thenM` \ (arg1',fv_arg1) ->
- rnCmdTop arg2 `thenM` \ (arg2',fv_arg2) ->
-
+ = do { (op',fv_op) <- escapeArrowScope (rnLExpr op)
+ ; let L _ (HsVar op_name) = op'
+ ; (arg1',fv_arg1) <- rnCmdTop arg1
+ ; (arg2',fv_arg2) <- rnCmdTop arg2
-- Deal with fixity
-
- lookupFixityRn op_name `thenM` \ fixity ->
- mkOpFormRn arg1' op' fixity arg2' `thenM` \ final_e ->
-
- return (final_e,
- fv_arg1 `plusFV` fv_op `plusFV` fv_arg2)
+ ; fixity <- lookupFixityRn op_name
+ ; final_e <- mkOpFormRn arg1' op' fixity arg2'
+ ; return (final_e, fv_arg1 `plusFV` fv_op `plusFV` fv_arg2) }
rnCmd (HsCmdArrForm op fixity cmds)
- = escapeArrowScope (rnLExpr op) `thenM` \ (op',fvOp) ->
- rnCmdArgs cmds `thenM` \ (cmds',fvCmds) ->
- return (HsCmdArrForm op' fixity cmds', fvOp `plusFV` fvCmds)
+ = do { (op',fvOp) <- escapeArrowScope (rnLExpr op)
+ ; (cmds',fvCmds) <- rnCmdArgs cmds
+ ; return (HsCmdArrForm op' fixity cmds', fvOp `plusFV` fvCmds) }
rnCmd (HsCmdApp fun arg)
- = rnLCmd fun `thenM` \ (fun',fvFun) ->
- rnLExpr arg `thenM` \ (arg',fvArg) ->
- return (HsCmdApp fun' arg', fvFun `plusFV` fvArg)
+ = do { (fun',fvFun) <- rnLCmd fun
+ ; (arg',fvArg) <- rnLExpr arg
+ ; return (HsCmdApp fun' arg', fvFun `plusFV` fvArg) }
rnCmd (HsCmdLam matches)
- = rnMatchGroup LambdaExpr rnLCmd matches `thenM` \ (matches', fvMatch) ->
- return (HsCmdLam matches', fvMatch)
+ = do { (matches', fvMatch) <- rnMatchGroup LambdaExpr rnLCmd matches
+ ; return (HsCmdLam matches', fvMatch) }
rnCmd (HsCmdPar e)
= do { (e', fvs_e) <- rnLCmd e
; return (HsCmdPar e', fvs_e) }
rnCmd (HsCmdCase expr matches)
- = rnLExpr expr `thenM` \ (new_expr, e_fvs) ->
- rnMatchGroup CaseAlt rnLCmd matches `thenM` \ (new_matches, ms_fvs) ->
- return (HsCmdCase new_expr new_matches, e_fvs `plusFV` ms_fvs)
+ = do { (new_expr, e_fvs) <- rnLExpr expr
+ ; (new_matches, ms_fvs) <- rnMatchGroup CaseAlt rnLCmd matches
+ ; return (HsCmdCase new_expr new_matches, e_fvs `plusFV` ms_fvs) }
rnCmd (HsCmdIf _ p b1 b2)
= do { (p', fvP) <- rnLExpr p
@@ -486,9 +469,9 @@ rnCmd (HsCmdIf _ p b1 b2)
; return (HsCmdIf mb_ite p' b1' b2', plusFVs [fvITE, fvP, fvB1, fvB2]) }
rnCmd (HsCmdLet binds cmd)
- = rnLocalBindsAndThen binds $ \ binds' ->
- rnLCmd cmd `thenM` \ (cmd',fvExpr) ->
- return (HsCmdLet binds' cmd', fvExpr)
+ = rnLocalBindsAndThen binds $ \ binds' -> do
+ { (cmd',fvExpr) <- rnLCmd cmd
+ ; return (HsCmdLet binds' cmd', fvExpr) }
rnCmd (HsCmdDo stmts _)
= do { ((stmts', _), fvs) <- rnStmts ArrowExpr rnLCmd stmts (\ _ -> return ((), emptyFVs))
@@ -578,25 +561,25 @@ methodNamesStmt (TransStmt {}) = emptyFVs
\begin{code}
rnArithSeq :: ArithSeqInfo RdrName -> RnM (ArithSeqInfo Name, FreeVars)
rnArithSeq (From expr)
- = rnLExpr expr `thenM` \ (expr', fvExpr) ->
- return (From expr', fvExpr)
+ = do { (expr', fvExpr) <- rnLExpr expr
+ ; return (From expr', fvExpr) }
rnArithSeq (FromThen expr1 expr2)
- = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) ->
- rnLExpr expr2 `thenM` \ (expr2', fvExpr2) ->
- return (FromThen expr1' expr2', fvExpr1 `plusFV` fvExpr2)
+ = do { (expr1', fvExpr1) <- rnLExpr expr1
+ ; (expr2', fvExpr2) <- rnLExpr expr2
+ ; return (FromThen expr1' expr2', fvExpr1 `plusFV` fvExpr2) }
rnArithSeq (FromTo expr1 expr2)
- = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) ->
- rnLExpr expr2 `thenM` \ (expr2', fvExpr2) ->
- return (FromTo expr1' expr2', fvExpr1 `plusFV` fvExpr2)
+ = do { (expr1', fvExpr1) <- rnLExpr expr1
+ ; (expr2', fvExpr2) <- rnLExpr expr2
+ ; return (FromTo expr1' expr2', fvExpr1 `plusFV` fvExpr2) }
rnArithSeq (FromThenTo expr1 expr2 expr3)
- = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) ->
- rnLExpr expr2 `thenM` \ (expr2', fvExpr2) ->
- rnLExpr expr3 `thenM` \ (expr3', fvExpr3) ->
- return (FromThenTo expr1' expr2' expr3',
- plusFVs [fvExpr1, fvExpr2, fvExpr3])
+ = do { (expr1', fvExpr1) <- rnLExpr expr1
+ ; (expr2', fvExpr2) <- rnLExpr expr2
+ ; (expr3', fvExpr3) <- rnLExpr expr3
+ ; return (FromThenTo expr1' expr2' expr3',
+ plusFVs [fvExpr1, fvExpr2, fvExpr3]) }
\end{code}
%************************************************************************
@@ -959,21 +942,19 @@ rn_rec_stmt rnBody _ (L loc (LastStmt body _)) _
L loc (LastStmt body' ret_op))] }
rn_rec_stmt rnBody _ (L loc (BodyStmt body _ _ _)) _
- = rnBody body `thenM` \ (body', fvs) ->
- lookupSyntaxName thenMName `thenM` \ (then_op, fvs1) ->
- return [(emptyNameSet, fvs `plusFV` fvs1, emptyNameSet,
- L loc (BodyStmt body' then_op noSyntaxExpr placeHolderType))]
+ = do { (body', fvs) <- rnBody body
+ ; (then_op, fvs1) <- lookupSyntaxName thenMName
+ ; return [(emptyNameSet, fvs `plusFV` fvs1, emptyNameSet,
+ L loc (BodyStmt body' then_op noSyntaxExpr placeHolderType))] }
rn_rec_stmt rnBody _ (L loc (BindStmt pat' body _ _)) fv_pat
- = rnBody body `thenM` \ (body', fv_expr) ->
- lookupSyntaxName bindMName `thenM` \ (bind_op, fvs1) ->
- lookupSyntaxName failMName `thenM` \ (fail_op, fvs2) ->
- let
- bndrs = mkNameSet (collectPatBinders pat')
- fvs = fv_expr `plusFV` fv_pat `plusFV` fvs1 `plusFV` fvs2
- in
- return [(bndrs, fvs, bndrs `intersectNameSet` fvs,
- L loc (BindStmt pat' body' bind_op fail_op))]
+ = do { (body', fv_expr) <- rnBody body
+ ; (bind_op, fvs1) <- lookupSyntaxName bindMName
+ ; (fail_op, fvs2) <- lookupSyntaxName failMName
+ ; let bndrs = mkNameSet (collectPatBinders pat')
+ fvs = fv_expr `plusFV` fv_pat `plusFV` fvs1 `plusFV` fvs2
+ ; return [(bndrs, fvs, bndrs `intersectNameSet` fvs,
+ L loc (BindStmt pat' body' bind_op fail_op))] }
rn_rec_stmt _ _ (L _ (LetStmt binds@(HsIPBinds _))) _
= failWith (badIpBinds (ptext (sLit "an mdo expression")) binds)
@@ -1003,9 +984,9 @@ rn_rec_stmts :: Outputable (body RdrName) =>
-> [Name]
-> [(LStmtLR Name RdrName (Located (body RdrName)), FreeVars)]
-> RnM [Segment (LStmt Name (Located (body Name)))]
-rn_rec_stmts rnBody bndrs stmts =
- mapM (uncurry (rn_rec_stmt rnBody bndrs)) stmts `thenM` \ segs_s ->
- return (concat segs_s)
+rn_rec_stmts rnBody bndrs stmts
+ = do { segs_s <- mapM (uncurry (rn_rec_stmt rnBody bndrs)) stmts
+ ; return (concat segs_s) }
---------------------------------------------
segmentRecStmts :: HsStmtContext Name
@@ -1245,8 +1226,8 @@ checkStmt :: HsStmtContext Name
checkStmt ctxt (L _ stmt)
= do { dflags <- getDynFlags
; case okStmt dflags ctxt stmt of
- Nothing -> return ()
- Just extra -> addErr (msg $$ extra) }
+ IsValid -> return ()
+ NotValid extra -> addErr (msg $$ extra) }
where
msg = sep [ ptext (sLit "Unexpected") <+> pprStmtCat stmt <+> ptext (sLit "statement")
, ptext (sLit "in") <+> pprAStmtContext ctxt ]
@@ -1261,13 +1242,12 @@ pprStmtCat (RecStmt {}) = ptext (sLit "rec")
pprStmtCat (ParStmt {}) = ptext (sLit "parallel")
------------
-isOK, notOK :: Maybe SDoc
-isOK = Nothing
-notOK = Just empty
+emptyInvalid :: Validity -- Payload is the empty document
+emptyInvalid = NotValid empty
okStmt, okDoStmt, okCompStmt, okParStmt, okPArrStmt
:: DynFlags -> HsStmtContext Name
- -> Stmt RdrName (Located (body RdrName)) -> Maybe SDoc
+ -> Stmt RdrName (Located (body RdrName)) -> Validity
-- Return Nothing if OK, (Just extra) if not ok
-- The "extra" is an SDoc that is appended to an generic error message
@@ -1285,59 +1265,59 @@ okStmt dflags ctxt stmt
TransStmtCtxt ctxt -> okStmt dflags ctxt stmt
-------------
-okPatGuardStmt :: Stmt RdrName (Located (body RdrName)) -> Maybe SDoc
+okPatGuardStmt :: Stmt RdrName (Located (body RdrName)) -> Validity
okPatGuardStmt stmt
= case stmt of
- BodyStmt {} -> isOK
- BindStmt {} -> isOK
- LetStmt {} -> isOK
- _ -> notOK
+ BodyStmt {} -> IsValid
+ BindStmt {} -> IsValid
+ LetStmt {} -> IsValid
+ _ -> emptyInvalid
-------------
okParStmt dflags ctxt stmt
= case stmt of
- LetStmt (HsIPBinds {}) -> notOK
+ LetStmt (HsIPBinds {}) -> emptyInvalid
_ -> okStmt dflags ctxt stmt
----------------
okDoStmt dflags ctxt stmt
= case stmt of
RecStmt {}
- | Opt_RecursiveDo `xopt` dflags -> isOK
- | ArrowExpr <- ctxt -> isOK -- Arrows allows 'rec'
- | otherwise -> Just (ptext (sLit "Use RecursiveDo"))
- BindStmt {} -> isOK
- LetStmt {} -> isOK
- BodyStmt {} -> isOK
- _ -> notOK
+ | Opt_RecursiveDo `xopt` dflags -> IsValid
+ | ArrowExpr <- ctxt -> IsValid -- Arrows allows 'rec'
+ | otherwise -> NotValid (ptext (sLit "Use RecursiveDo"))
+ BindStmt {} -> IsValid
+ LetStmt {} -> IsValid
+ BodyStmt {} -> IsValid
+ _ -> emptyInvalid
----------------
okCompStmt dflags _ stmt
= case stmt of
- BindStmt {} -> isOK
- LetStmt {} -> isOK
- BodyStmt {} -> isOK
+ BindStmt {} -> IsValid
+ LetStmt {} -> IsValid
+ BodyStmt {} -> IsValid
ParStmt {}
- | Opt_ParallelListComp `xopt` dflags -> isOK
- | otherwise -> Just (ptext (sLit "Use ParallelListComp"))
+ | Opt_ParallelListComp `xopt` dflags -> IsValid
+ | otherwise -> NotValid (ptext (sLit "Use ParallelListComp"))
TransStmt {}
- | Opt_TransformListComp `xopt` dflags -> isOK
- | otherwise -> Just (ptext (sLit "Use TransformListComp"))
- RecStmt {} -> notOK
- LastStmt {} -> notOK -- Should not happen (dealt with by checkLastStmt)
+ | Opt_TransformListComp `xopt` dflags -> IsValid
+ | otherwise -> NotValid (ptext (sLit "Use TransformListComp"))
+ RecStmt {} -> emptyInvalid
+ LastStmt {} -> emptyInvalid -- Should not happen (dealt with by checkLastStmt)
----------------
okPArrStmt dflags _ stmt
= case stmt of
- BindStmt {} -> isOK
- LetStmt {} -> isOK
- BodyStmt {} -> isOK
+ BindStmt {} -> IsValid
+ LetStmt {} -> IsValid
+ BodyStmt {} -> IsValid
ParStmt {}
- | Opt_ParallelListComp `xopt` dflags -> isOK
- | otherwise -> Just (ptext (sLit "Use ParallelListComp"))
- TransStmt {} -> notOK
- RecStmt {} -> notOK
- LastStmt {} -> notOK -- Should not happen (dealt with by checkLastStmt)
+ | Opt_ParallelListComp `xopt` dflags -> IsValid
+ | otherwise -> NotValid (ptext (sLit "Use ParallelListComp"))
+ TransStmt {} -> emptyInvalid
+ RecStmt {} -> emptyInvalid
+ LastStmt {} -> emptyInvalid -- Should not happen (dealt with by checkLastStmt)
---------
checkTupleSection :: [HsTupArg RdrName] -> RnM ()
diff --git a/compiler/rename/RnNames.lhs b/compiler/rename/RnNames.lhs
index 56ee969aed2b..5071828e4d87 100644
--- a/compiler/rename/RnNames.lhs
+++ b/compiler/rename/RnNames.lhs
@@ -4,6 +4,8 @@
\section[RnNames]{Extracting imported and top-level names in scope}
\begin{code}
+{-# LANGUAGE CPP, NondecreasingIndentation #-}
+
module RnNames (
rnImports, getLocalNonValBinders,
rnExports, extendGlobalRdrEnvRn,
@@ -257,7 +259,7 @@ rnImportDecl this_mod
imp_mod : dep_finsts deps
| otherwise = dep_finsts deps
- pkg = modulePackageId (mi_module iface)
+ pkg = modulePackageKey (mi_module iface)
-- Does this import mean we now require our own pkg
-- to be trusted? See Note [Trust Own Package]
@@ -572,6 +574,29 @@ the environment, and then process the type instances.
@filterImports@ takes the @ExportEnv@ telling what the imported module makes
available, and filters it through the import spec (if any).
+Note [Dealing with imports]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For import M( ies ), we take the mi_exports of M, and make
+ imp_occ_env :: OccEnv (Name, AvailInfo, Maybe Name)
+One entry for each Name that M exports; the AvailInfo describes just
+that Name.
+
+The situation is made more complicated by associated types. E.g.
+ module M where
+ class C a where { data T a }
+ instance C Int where { data T Int = T1 | T2 }
+ instance C Bool where { data T Int = T3 }
+Then M's export_avails are (recall the AvailTC invariant from Avails.hs)
+ C(C,T), T(T,T1,T2,T3)
+Notice that T appears *twice*, once as a child and once as a parent.
+From this we construct the imp_occ_env
+ C -> (C, C(C,T), Nothing
+ T -> (T, T(T,T1,T2,T3), Just C)
+ T1 -> (T1, T(T1,T2,T3), Nothing) -- similarly T2,T3
+
+Note that the imp_occ_env will have entries for data constructors too,
+although we never look up data constructors.
+
\begin{code}
filterImports :: ModIface
-> ImpDeclSpec -- The span for the entire import decl
@@ -605,34 +630,22 @@ filterImports iface decl_spec (Just (want_hiding, import_items))
where
all_avails = mi_exports iface
- -- This environment is how we map names mentioned in the import
- -- list to the actual Name they correspond to, and the name family
- -- that the Name belongs to (the AvailInfo). The situation is
- -- complicated by associated families, which introduce a three-level
- -- hierachy, where class = grand parent, assoc family = parent, and
- -- data constructors = children. The occ_env entries for associated
- -- families needs to capture all this information; hence, we have the
- -- third component of the environment that gives the class name (=
- -- grand parent) in case of associated families.
- --
- -- This env will have entries for data constructors too,
- -- they won't make any difference because naked entities like T
- -- in an import list map to TcOccs, not VarOccs.
- occ_env :: OccEnv (Name, -- the name
- AvailInfo, -- the export item providing the name
- Maybe Name) -- the parent of associated types
- occ_env = mkOccEnv_C combine [ (nameOccName n, (n, a, Nothing))
- | a <- all_avails, n <- availNames a]
+ -- See Note [Dealing with imports]
+ imp_occ_env :: OccEnv (Name, -- the name
+ AvailInfo, -- the export item providing the name
+ Maybe Name) -- the parent of associated types
+ imp_occ_env = mkOccEnv_C combine [ (nameOccName n, (n, a, Nothing))
+ | a <- all_avails, n <- availNames a]
where
- -- we know that (1) there are at most 2 entries for one name, (2) their
- -- first component is identical, (3) they are for tys/cls, and (4) one
- -- entry has the name in its parent position (the other doesn't)
- combine (name, AvailTC p1 subs1, Nothing)
- (_ , AvailTC p2 subs2, Nothing)
- = let
- (parent, subs) = if p1 == name then (p2, subs1) else (p1, subs2)
- in
- (name, AvailTC name subs, Just parent)
+ -- See example in Note [Dealing with imports]
+ -- 'combine' is only called for associated types which appear twice
+ -- in the all_avails. In the example, we combine
+ -- T(T,T1,T2,T3) and C(C,T) to give (T, T(T,T1,T2,T3), Just C)
+ combine (name1, a1@(AvailTC p1 _), mp1)
+ (name2, a2@(AvailTC p2 _), mp2)
+ = ASSERT( name1 == name2 && isNothing mp1 && isNothing mp2 )
+ if p1 == name1 then (name1, a1, Just p2)
+ else (name1, a2, Just p1)
combine x y = pprPanic "filterImports/combine" (ppr x $$ ppr y)
lookup_name :: RdrName -> IELookupM (Name, AvailInfo, Maybe Name)
@@ -640,7 +653,7 @@ filterImports iface decl_spec (Just (want_hiding, import_items))
| Just succ <- mb_success = return succ
| otherwise = failLookupWith BadImport
where
- mb_success = lookupOccEnv occ_env (rdrNameOcc rdr)
+ mb_success = lookupOccEnv imp_occ_env (rdrNameOcc rdr)
lookup_lie :: LIE RdrName -> TcRn [(LIE Name, AvailInfo)]
lookup_lie (L loc ieRdr)
@@ -677,7 +690,7 @@ filterImports iface decl_spec (Just (want_hiding, import_items))
-- type/class and a data constructor. Moreover, when we import
-- data constructors of an associated family, we need separate
-- AvailInfos for the data constructors and the family (as they have
- -- different parents). See the discussion at occ_env.
+ -- different parents). See Note [Dealing with imports]
lookup_ie :: IE RdrName -> IELookupM ([(IE Name, AvailInfo)], [IELookupWarning])
lookup_ie ie = handle_bad_import $ do
case ie of
@@ -713,11 +726,16 @@ filterImports iface decl_spec (Just (want_hiding, import_items))
-> do nameAvail <- lookup_name tc
return ([mkIEThingAbs nameAvail], [])
- IEThingWith tc ns -> do
- (name, AvailTC _ subnames, mb_parent) <- lookup_name tc
+ IEThingWith rdr_tc rdr_ns -> do
+ (name, AvailTC _ ns, mb_parent) <- lookup_name rdr_tc
-- Look up the children in the sub-names of the parent
- let mb_children = lookupChildren subnames ns
+ let subnames = case ns of -- The tc is first in ns,
+ [] -> [] -- if it is there at all
+ -- See the AvailTC Invariant in Avail.hs
+ (n1:ns1) | n1 == name -> ns1
+ | otherwise -> ns
+ mb_children = lookupChildren subnames rdr_ns
children <- if any isNothing mb_children
then failLookupWith BadImport
@@ -1285,11 +1303,14 @@ type ImportDeclUsage
warnUnusedImportDecls :: TcGblEnv -> RnM ()
warnUnusedImportDecls gbl_env
= do { uses <- readMutVar (tcg_used_rdrnames gbl_env)
- ; let imports = filter explicit_import (tcg_rn_imports gbl_env)
+ ; let user_imports = filterOut (ideclImplicit . unLoc) (tcg_rn_imports gbl_env)
+ -- This whole function deals only with *user* imports
+ -- both for warning about unnecessary ones, and for
+ -- deciding the minimal ones
rdr_env = tcg_rdr_env gbl_env
; let usage :: [ImportDeclUsage]
- usage = findImportUsage imports rdr_env (Set.elems uses)
+ usage = findImportUsage user_imports rdr_env (Set.elems uses)
; traceRn (vcat [ ptext (sLit "Uses:") <+> ppr (Set.elems uses)
, ptext (sLit "Import usage") <+> ppr usage])
@@ -1298,10 +1319,6 @@ warnUnusedImportDecls gbl_env
; whenGOptM Opt_D_dump_minimal_imports $
printMinimalImports usage }
- where
- explicit_import (L _ decl) = not (ideclImplicit decl)
- -- Filter out the implicit Prelude import
- -- which we do not want to bleat about
\end{code}
@@ -1417,6 +1434,11 @@ warnUnusedImport :: ImportDeclUsage -> RnM ()
warnUnusedImport (L loc decl, used, unused)
| Just (False,[]) <- ideclHiding decl
= return () -- Do not warn for 'import M()'
+
+ | Just (True, hides) <- ideclHiding decl
+ , not (null hides)
+ , pRELUDE_NAME == unLoc (ideclName decl)
+ = return () -- Note [Do not warn about Prelude hiding]
| null used = addWarnAt loc msg1 -- Nothing used; drop entire decl
| null unused = return () -- Everything imported is used; nop
| otherwise = addWarnAt loc msg2 -- Some imports are unused
@@ -1436,6 +1458,19 @@ warnUnusedImport (L loc decl, used, unused)
pp_not_used = text "is redundant"
\end{code}
+Note [Do not warn about Prelude hiding]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We do not warn about
+ import Prelude hiding( x, y )
+because even if nothing else from Prelude is used, it may be essential to hide
+x,y to avoid name-shadowing warnings. Example (Trac #9061)
+ import Prelude hiding( log )
+ f x = log where log = ()
+
+
+
+Note [Printing minimal imports]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To print the minimal imports we walk over the user-supplied import
decls, and simply trim their import lists. NB that
@@ -1446,6 +1481,7 @@ decls, and simply trim their import lists. NB that
\begin{code}
printMinimalImports :: [ImportDeclUsage] -> RnM ()
+-- See Note [Printing minimal imports]
printMinimalImports imports_w_usage
= do { imports' <- mapM mk_minimal imports_w_usage
; this_mod <- getModule
diff --git a/compiler/rename/RnPat.lhs b/compiler/rename/RnPat.lhs
index 3fde563cd346..48fffce3740c 100644
--- a/compiler/rename/RnPat.lhs
+++ b/compiler/rename/RnPat.lhs
@@ -10,19 +10,15 @@ general, all of these functions return a renamed thing, and a set of
free variables.
\begin{code}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
+{-# LANGUAGE CPP, RankNTypes, ScopedTypeVariables #-}
-{-# LANGUAGE ScopedTypeVariables #-}
module RnPat (-- main entry points
rnPat, rnPats, rnBindPat, rnPatAndThen,
NameMaker, applyNameMaker, -- a utility for making names:
localRecNameMaker, topRecNameMaker, -- sometimes we want to make local names,
-- sometimes we want to make top (qualified) names.
+ isTopRecNameMaker,
rnHsRecFields1, HsRecFieldContext(..),
@@ -193,6 +189,10 @@ data NameMaker
topRecNameMaker :: MiniFixityEnv -> NameMaker
topRecNameMaker fix_env = LetMk TopLevel fix_env
+isTopRecNameMaker :: NameMaker -> Bool
+isTopRecNameMaker (LetMk TopLevel _) = True
+isTopRecNameMaker _ = False
+
localRecNameMaker :: MiniFixityEnv -> NameMaker
localRecNameMaker fix_env = LetMk NotTopLevel fix_env
@@ -434,7 +434,7 @@ rnPatAndThen mk (PArrPat pats _)
rnPatAndThen mk (TuplePat pats boxed _)
= do { liftCps $ checkTupSize (length pats)
; pats' <- rnLPatsAndThen mk pats
- ; return (TuplePat pats' boxed placeHolderType) }
+ ; return (TuplePat pats' boxed []) }
rnPatAndThen _ (SplicePat splice)
= do { -- XXX How to deal with free variables?
diff --git a/compiler/rename/RnSource.lhs b/compiler/rename/RnSource.lhs
index c726d554fcb7..a3bd38a3ec35 100644
--- a/compiler/rename/RnSource.lhs
+++ b/compiler/rename/RnSource.lhs
@@ -4,6 +4,8 @@
\section[RnSource]{Main pass of renamer}
\begin{code}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+
module RnSource (
rnSrcDecls, addTcgDUs, rnTyClDecls, findSplice
) where
@@ -35,7 +37,7 @@ import NameEnv
import Avail
import Outputable
import Bag
-import BasicTypes ( RuleName, Origin(..) )
+import BasicTypes ( RuleName )
import FastString
import SrcLoc
import DynFlags
@@ -382,8 +384,8 @@ rnHsForeignDecl (ForeignImport name ty _ spec)
; (ty', fvs) <- rnLHsType (ForeignDeclCtx name) ty
-- Mark any PackageTarget style imports as coming from the current package
- ; let packageId = thisPackage $ hsc_dflags topEnv
- spec' = patchForeignImport packageId spec
+ ; let packageKey = thisPackage $ hsc_dflags topEnv
+ spec' = patchForeignImport packageKey spec
; return (ForeignImport name' ty' noForeignImportCoercionYet spec', fvs) }
@@ -400,20 +402,20 @@ rnHsForeignDecl (ForeignExport name ty _ spec)
-- package, so if they get inlined across a package boundry we'll still
-- know where they're from.
--
-patchForeignImport :: PackageId -> ForeignImport -> ForeignImport
-patchForeignImport packageId (CImport cconv safety fs spec)
- = CImport cconv safety fs (patchCImportSpec packageId spec)
+patchForeignImport :: PackageKey -> ForeignImport -> ForeignImport
+patchForeignImport packageKey (CImport cconv safety fs spec)
+ = CImport cconv safety fs (patchCImportSpec packageKey spec)
-patchCImportSpec :: PackageId -> CImportSpec -> CImportSpec
-patchCImportSpec packageId spec
+patchCImportSpec :: PackageKey -> CImportSpec -> CImportSpec
+patchCImportSpec packageKey spec
= case spec of
- CFunction callTarget -> CFunction $ patchCCallTarget packageId callTarget
+ CFunction callTarget -> CFunction $ patchCCallTarget packageKey callTarget
_ -> spec
-patchCCallTarget :: PackageId -> CCallTarget -> CCallTarget
-patchCCallTarget packageId callTarget =
+patchCCallTarget :: PackageKey -> CCallTarget -> CCallTarget
+patchCCallTarget packageKey callTarget =
case callTarget of
- StaticTarget label Nothing isFun -> StaticTarget label (Just packageId) isFun
+ StaticTarget label Nothing isFun -> StaticTarget label (Just packageKey) isFun
_ -> callTarget
@@ -443,12 +445,14 @@ rnSrcInstDecl (ClsInstD { cid_inst = cid })
rnClsInstDecl :: ClsInstDecl RdrName -> RnM (ClsInstDecl Name, FreeVars)
rnClsInstDecl (ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = mbinds
, cid_sigs = uprags, cid_tyfam_insts = ats
+ , cid_overlap_mode = oflag
, cid_datafam_insts = adts })
-- Used for both source and interface file decls
= do { (inst_ty', inst_fvs) <- rnLHsInstType (text "In an instance declaration") inst_ty
; case splitLHsInstDeclTy_maybe inst_ty' of {
Nothing -> return (ClsInstDecl { cid_poly_ty = inst_ty', cid_binds = emptyLHsBinds
, cid_sigs = [], cid_tyfam_insts = []
+ , cid_overlap_mode = oflag
, cid_datafam_insts = [] }
, inst_fvs) ;
Just (inst_tyvars, _, L _ cls,_) ->
@@ -461,7 +465,7 @@ rnClsInstDecl (ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = mbinds
; traceRn (text "rnSrcInstDecl" <+> ppr inst_ty' $$ ppr inst_tyvars $$ ppr ktv_names)
; ((ats', adts', other_sigs'), more_fvs)
<- extendTyVarEnvFVRn ktv_names $
- do { (ats', at_fvs) <- rnATInstDecls rnTyFamInstDecl cls inst_tyvars ats
+ do { (ats', at_fvs) <- rnATInstDecls rnTyFamInstDecl cls inst_tyvars ats
; (adts', adt_fvs) <- rnATInstDecls rnDataFamInstDecl cls inst_tyvars adts
; (other_sigs', sig_fvs) <- renameSigs (InstDeclCtxt cls) other_sigs
; return ( (ats', adts', other_sigs')
@@ -491,6 +495,7 @@ rnClsInstDecl (ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = mbinds
`plusFV` inst_fvs
; return (ClsInstDecl { cid_poly_ty = inst_ty', cid_binds = mbinds'
, cid_sigs = uprags', cid_tyfam_insts = ats'
+ , cid_overlap_mode = oflag
, cid_datafam_insts = adts' },
all_fvs) } } }
-- We return the renamed associated data type declarations so
@@ -559,14 +564,29 @@ rnTyFamInstDecl mb_cls (TyFamInstDecl { tfid_eqn = L loc eqn })
rnTyFamInstEqn :: Maybe (Name, [Name])
-> TyFamInstEqn RdrName
-> RnM (TyFamInstEqn Name, FreeVars)
-rnTyFamInstEqn mb_cls (TyFamInstEqn { tfie_tycon = tycon
- , tfie_pats = HsWB { hswb_cts = pats }
- , tfie_rhs = rhs })
+rnTyFamInstEqn mb_cls (TyFamEqn { tfe_tycon = tycon
+ , tfe_pats = HsWB { hswb_cts = pats }
+ , tfe_rhs = rhs })
= do { (tycon', pats', rhs', fvs) <-
rnFamInstDecl (TySynCtx tycon) mb_cls tycon pats rhs rnTySyn
- ; return (TyFamInstEqn { tfie_tycon = tycon'
- , tfie_pats = pats'
- , tfie_rhs = rhs' }, fvs) }
+ ; return (TyFamEqn { tfe_tycon = tycon'
+ , tfe_pats = pats'
+ , tfe_rhs = rhs' }, fvs) }
+
+rnTyFamDefltEqn :: Name
+ -> TyFamDefltEqn RdrName
+ -> RnM (TyFamDefltEqn Name, FreeVars)
+rnTyFamDefltEqn cls (TyFamEqn { tfe_tycon = tycon
+ , tfe_pats = tyvars
+ , tfe_rhs = rhs })
+ = bindHsTyVars ctx (Just cls) [] tyvars $ \ tyvars' ->
+ do { tycon' <- lookupFamInstName (Just cls) tycon
+ ; (rhs', fvs) <- rnLHsType ctx rhs
+ ; return (TyFamEqn { tfe_tycon = tycon'
+ , tfe_pats = tyvars'
+ , tfe_rhs = rhs' }, fvs) }
+ where
+ ctx = TyFamilyCtx tycon
rnDataFamInstDecl :: Maybe (Name, [Name])
-> DataFamInstDecl RdrName
@@ -585,7 +605,7 @@ rnDataFamInstDecl mb_cls (DataFamInstDecl { dfid_tycon = tycon
Renaming of the associated types in instances.
\begin{code}
--- rename associated type family decl in class
+-- Rename associated type family decl in class
rnATDecls :: Name -- Class
-> [LFamilyDecl RdrName]
-> RnM ([LFamilyDecl Name], FreeVars)
@@ -635,11 +655,11 @@ extendTyVarEnvForMethodBinds ktv_names thing_inside
\begin{code}
rnSrcDerivDecl :: DerivDecl RdrName -> RnM (DerivDecl Name, FreeVars)
-rnSrcDerivDecl (DerivDecl ty)
+rnSrcDerivDecl (DerivDecl ty overlap)
= do { standalone_deriv_ok <- xoptM Opt_StandaloneDeriving
; unless standalone_deriv_ok (addErr standaloneDerivErr)
; (ty', fvs) <- rnLHsInstType (text "In a deriving declaration") ty
- ; return (DerivDecl ty', fvs) }
+ ; return (DerivDecl ty' overlap, fvs) }
standaloneDerivErr :: SDoc
standaloneDerivErr
@@ -863,10 +883,10 @@ packages, it is safe not to add the dependencies on the .hs-boot stuff to B2.
See also Note [Grouping of type and class declarations] in TcTyClsDecls.
\begin{code}
-isInPackage :: PackageId -> Name -> Bool
+isInPackage :: PackageKey -> Name -> Bool
isInPackage pkgId nm = case nameModule_maybe nm of
Nothing -> False
- Just m -> pkgId == modulePackageId m
+ Just m -> pkgId == modulePackageKey m
-- We use nameModule_maybe because we might be in a TH splice, in which case
-- there is no module name. In that case we cannot have mutual dependencies,
-- so it's fine to return False here.
@@ -936,7 +956,7 @@ rnTyClDecl (SynDecl { tcdLName = tycon, tcdTyVars = tyvars, tcdRhs = rhs })
do { (rhs', fvs) <- rnTySyn doc rhs
; return ((tyvars', rhs'), fvs) }
; return (SynDecl { tcdLName = tycon', tcdTyVars = tyvars'
- , tcdRhs = rhs', tcdFVs = fvs }, fvs) }
+ , tcdRhs = rhs', tcdFVs = fvs }, fvs) }
-- "data", "newtype" declarations
-- both top level and (for an associated type) in an instance decl
@@ -961,20 +981,20 @@ rnTyClDecl (ClassDecl {tcdCtxt = context, tcdLName = lcls,
-- kind signatures on the tyvars
-- Tyvars scope over superclass context and method signatures
- ; ((tyvars', context', fds', ats', at_defs', sigs'), stuff_fvs)
+ ; ((tyvars', context', fds', ats', sigs'), stuff_fvs)
<- bindHsTyVars cls_doc Nothing kvs tyvars $ \ tyvars' -> do
-- Checks for distinct tyvars
{ (context', cxt_fvs) <- rnContext cls_doc context
- ; fds' <- rnFds (docOfHsDocContext cls_doc) fds
+ ; fds' <- rnFds fds
-- The fundeps have no free variables
; (ats', fv_ats) <- rnATDecls cls' ats
- ; (at_defs', fv_at_defs) <- rnATInstDecls rnTyFamInstDecl cls' tyvars' at_defs
; (sigs', sig_fvs) <- renameSigs (ClsDeclCtxt cls') sigs
; let fvs = cxt_fvs `plusFV`
sig_fvs `plusFV`
- fv_ats `plusFV`
- fv_at_defs
- ; return ((tyvars', context', fds', ats', at_defs', sigs'), fvs) }
+ fv_ats
+ ; return ((tyvars', context', fds', ats', sigs'), fvs) }
+
+ ; (at_defs', fv_at_defs) <- rnList (rnTyFamDefltEqn cls') at_defs
-- No need to check for duplicate associated type decls
-- since that is done by RnNames.extendGlobalRdrEnvRn
@@ -1006,7 +1026,7 @@ rnTyClDecl (ClassDecl {tcdCtxt = context, tcdLName = lcls,
-- Haddock docs
; docs' <- mapM (wrapLocM rnDocDecl) docs
- ; let all_fvs = meth_fvs `plusFV` stuff_fvs
+ ; let all_fvs = meth_fvs `plusFV` stuff_fvs `plusFV` fv_at_defs
; return (ClassDecl { tcdCtxt = context', tcdLName = lcls',
tcdTyVars = tyvars', tcdFDs = fds', tcdSigs = sigs',
tcdMeths = mbinds', tcdATs = ats', tcdATDefs = at_defs',
@@ -1404,21 +1424,20 @@ extendRecordFieldEnv tycl_decls inst_decls
%*********************************************************
\begin{code}
-rnFds :: SDoc -> [Located (FunDep RdrName)] -> RnM [Located (FunDep Name)]
-
-rnFds doc fds
+rnFds :: [Located (FunDep RdrName)] -> RnM [Located (FunDep Name)]
+rnFds fds
= mapM (wrapLocM rn_fds) fds
where
rn_fds (tys1, tys2)
- = do { tys1' <- rnHsTyVars doc tys1
- ; tys2' <- rnHsTyVars doc tys2
+ = do { tys1' <- rnHsTyVars tys1
+ ; tys2' <- rnHsTyVars tys2
; return (tys1', tys2') }
-rnHsTyVars :: SDoc -> [RdrName] -> RnM [Name]
-rnHsTyVars doc tvs = mapM (rnHsTyVar doc) tvs
+rnHsTyVars :: [RdrName] -> RnM [Name]
+rnHsTyVars tvs = mapM rnHsTyVar tvs
-rnHsTyVar :: SDoc -> RdrName -> RnM Name
-rnHsTyVar _doc tyvar = lookupOccRn tyvar
+rnHsTyVar :: RdrName -> RnM Name
+rnHsTyVar tyvar = lookupOccRn tyvar
\end{code}
@@ -1518,7 +1537,7 @@ add_role_annot d (tycls@(TyClGroup { group_roles = roles }) : rest)
= tycls { group_roles = d : roles } : rest
add_bind :: LHsBind a -> HsValBinds a -> HsValBinds a
-add_bind b (ValBindsIn bs sigs) = ValBindsIn (bs `snocBag` (FromSource, b)) sigs
+add_bind b (ValBindsIn bs sigs) = ValBindsIn (bs `snocBag` b) sigs
add_bind _ (ValBindsOut {}) = panic "RdrHsSyn:add_bind"
add_sig :: LSig a -> HsValBinds a -> HsValBinds a
diff --git a/compiler/rename/RnSplice.lhs b/compiler/rename/RnSplice.lhs
index e0614d42488f..3c0c145e6b85 100644
--- a/compiler/rename/RnSplice.lhs
+++ b/compiler/rename/RnSplice.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE CPP #-}
+
module RnSplice (
rnTopSpliceDecls,
rnSpliceType, rnSpliceExpr, rnSplicePat, rnSpliceDecl,
diff --git a/compiler/rename/RnTypes.lhs b/compiler/rename/RnTypes.lhs
index 23c54c3bed9b..2f9bfdd65381 100644
--- a/compiler/rename/RnTypes.lhs
+++ b/compiler/rename/RnTypes.lhs
@@ -4,6 +4,8 @@
\section[RnSource]{Main pass of renamer}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module RnTypes (
-- Type related stuff
rnHsType, rnLHsType, rnLHsTypes, rnContext,
@@ -360,8 +362,9 @@ bindHsTyVars doc mb_assoc kv_bndrs tv_bndrs thing_inside
kvs_from_tv_bndrs = [ kv | L _ (KindedTyVar _ kind) <- tvs
, let (_, kvs) = extractHsTyRdrTyVars kind
, kv <- kvs ]
- all_kvs = filterOut (`elemLocalRdrEnv` rdr_env) $
- nub (kv_bndrs ++ kvs_from_tv_bndrs)
+ all_kvs' = nub (kv_bndrs ++ kvs_from_tv_bndrs)
+ all_kvs = filterOut (`elemLocalRdrEnv` rdr_env) all_kvs'
+
overlap_kvs = [ kv | kv <- all_kvs, any ((==) kv . hsLTyVarName) tvs ]
-- These variables appear both as kind and type variables
-- in the same declaration; eg type family T (x :: *) (y :: x)
@@ -395,8 +398,12 @@ bindHsTyVars doc mb_assoc kv_bndrs tv_bndrs thing_inside
; (tv_bndrs', fvs1) <- mapFvRn rn_tv_bndr tvs
; (res, fvs2) <- bindLocalNamesFV (map hsLTyVarName tv_bndrs') $
- do { env <- getLocalRdrEnv
- ; traceRn (text "bhtv" <+> (ppr tvs $$ ppr all_kvs $$ ppr env))
+ do { inner_rdr_env <- getLocalRdrEnv
+ ; traceRn (text "bhtv" <+> vcat
+ [ ppr tvs, ppr kv_bndrs, ppr kvs_from_tv_bndrs
+ , ppr $ map (`elemLocalRdrEnv` rdr_env) all_kvs'
+ , ppr $ map (getUnique . rdrNameOcc) all_kvs'
+ , ppr all_kvs, ppr rdr_env, ppr inner_rdr_env ])
; thing_inside (HsQTvs { hsq_tvs = tv_bndrs', hsq_kvs = kv_names }) }
; return (res, fvs1 `plusFV` fvs2) } }
diff --git a/compiler/simplCore/CSE.lhs b/compiler/simplCore/CSE.lhs
index 691f883d02a2..5330029063a5 100644
--- a/compiler/simplCore/CSE.lhs
+++ b/compiler/simplCore/CSE.lhs
@@ -4,6 +4,8 @@
\section{Common subexpression}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module CSE (cseProgram) where
#include "HsVersions.h"
@@ -12,7 +14,8 @@ import CoreSubst
import Var ( Var )
import Id ( Id, idType, idInlineActivation, zapIdOccInfo )
import CoreUtils ( mkAltExpr
- , exprIsTrivial)
+ , exprIsTrivial
+ , stripTicks, mkTick)
import Type ( tyConAppArgs )
import CoreSyn
import Outputable
@@ -185,9 +188,9 @@ cseBind env (Rec pairs)
cseRhs :: CSEnv -> (OutBndr, InExpr) -> (CSEnv, OutExpr)
cseRhs env (id',rhs)
- = case lookupCSEnv env rhs' of
+ = case lookupCSEnv env rhs'' of
Nothing -> (extendCSEnv env rhs' id', rhs')
- Just id -> (extendCSSubst env id' id, Var id)
+ Just id -> (extendCSSubst env id' id, foldr mkTick (Var id) ticks)
-- In the Just case, we have
-- x = rhs
-- ...
@@ -200,14 +203,17 @@ cseRhs env (id',rhs)
rhs' | isAlwaysActive (idInlineActivation id') = cseExpr env rhs
| otherwise = rhs
-- See Note [CSE for INLINE and NOINLINE]
+ (ticks, rhs'') = stripTicks tickishFloatable rhs'
+
tryForCSE :: CSEnv -> InExpr -> OutExpr
tryForCSE env expr
- | exprIsTrivial expr' = expr' -- No point
- | Just smaller <- lookupCSEnv env expr' = Var smaller
- | otherwise = expr'
+ | exprIsTrivial expr' = expr' -- No point
+ | Just smaller <- lookupCSEnv env expr'' = foldr mkTick (Var smaller) ticks
+ | otherwise = expr'
where
expr' = cseExpr env expr
+ (ticks, expr'') = stripTicks tickishFloatable expr'
cseExpr :: CSEnv -> InExpr -> OutExpr
cseExpr env (Type t) = Type (substTy (csEnvSubst env) t)
@@ -293,10 +299,15 @@ lookupCSEnv (CS { cs_map = csmap }) expr
= case lookupCoreMap csmap expr of
Just (_,e) -> Just e
Nothing -> Nothing
+ -- We don't want to lose the source notes when a common sub
+ -- expression gets eliminated. Hence we push all (!) of them on
+ -- top of the replaced sub-expression. This is probably not too
+ -- useful in practice, but upholds our semantics.
extendCSEnv :: CSEnv -> OutExpr -> Id -> CSEnv
extendCSEnv cse expr id
- = cse { cs_map = extendCoreMap (cs_map cse) expr (expr,id) }
+ = cse { cs_map = extendCoreMap (cs_map cse) sexpr (sexpr,id) }
+ where (_, sexpr) = stripTicks tickishFloatable expr
csEnvSubst :: CSEnv -> Subst
csEnvSubst = cs_subst
diff --git a/compiler/simplCore/CallArity.hs b/compiler/simplCore/CallArity.hs
new file mode 100644
index 000000000000..9dcb61639778
--- /dev/null
+++ b/compiler/simplCore/CallArity.hs
@@ -0,0 +1,667 @@
+--
+-- Copyright (c) 2014 Joachim Breitner
+--
+
+module CallArity
+ ( callArityAnalProgram
+ , callArityRHS -- for testing
+ ) where
+
+import VarSet
+import VarEnv
+import DynFlags ( DynFlags )
+
+import BasicTypes
+import CoreSyn
+import Id
+import CoreArity ( typeArity )
+import CoreUtils ( exprIsHNF )
+--import Outputable
+import UnVarGraph
+
+import Control.Arrow ( first, second )
+
+
+{-
+%************************************************************************
+%* *
+ Call Arity Analyis
+%* *
+%************************************************************************
+
+Note [Call Arity: The goal]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The goal of this analysis is to find out if we can eta-expand a local function,
+based on how it is being called. The motivating example is code this this,
+which comes up when we implement foldl using foldr, and do list fusion:
+
+ let go = \x -> let d = case ... of
+ False -> go (x+1)
+ True -> id
+ in \z -> d (x + z)
+ in go 1 0
+
+If we do not eta-expand `go` to have arity 2, we are going to allocate a lot of
+partial function applications, which would be bad.
+
+The function `go` has a type of arity two, but only one lambda is manifest.
+Further more, an analysis that only looks at the RHS of go cannot be sufficient
+to eta-expand go: If `go` is ever called with one argument (and the result used
+multiple times), we would be doing the work in `...` multiple times.
+
+So `callArityAnalProgram` looks at the whole let expression to figure out if
+all calls are nice, i.e. have a high enough arity. It then stores the result in
+the `calledArity` field of the `IdInfo` of `go`, which the next simplifier
+phase will eta-expand.
+
+The specification of the `calledArity` field is:
+
+ No work will be lost if you eta-expand me to the arity in `calledArity`.
+
+What we want to know for a variable
+-----------------------------------
+
+For every let-bound variable we'd like to know:
+ 1. A lower bound on the arity of all calls to the variable, and
+ 2. whether the variable is being called at most once or possible multiple
+ times.
+
+It is always ok to lower the arity, or pretend that there are multiple calls.
+In particular, "Minimum arity 0 and possible called multiple times" is always
+correct.
+
+
+What we want to know from an expression
+---------------------------------------
+
+In order to obtain that information for variables, we analyize expression and
+obtain bits of information:
+
+ I. The arity analysis:
+ For every variable, whether it is absent, or called,
+ and if called, which what arity.
+
+ II. The Co-Called analysis:
+ For every two variables, whether there is a possibility that both are being
+ called.
+ We obtain as a special case: For every variables, whether there is a
+ possibility that it is being called twice.
+
+For efficiency reasons, we gather this information only for a set of
+*interesting variables*, to avoid spending time on, e.g., variables from pattern matches.
+
+The two analysis are not completely independent, as a higher arity can improve
+the information about what variables are being called once or multiple times.
+
+Note [Analysis I: The arity analyis]
+------------------------------------
+
+The arity analysis is quite straight forward: The information about an
+expression is an
+ VarEnv Arity
+where absent variables are bound to Nothing and otherwise to a lower bound to
+their arity.
+
+When we analyize an expression, we analyize it with a given context arity.
+Lambdas decrease and applications increase the incoming arity. Analysizing a
+variable will put that arity in the environment. In lets or cases all the
+results from the various subexpressions are lubed, which takes the point-wise
+minimum (considering Nothing an infinity).
+
+
+Note [Analysis II: The Co-Called analysis]
+------------------------------------------
+
+The second part is more sophisticated. For reasons explained below, it is not
+sufficient to simply know how often an expression evalutes a variable. Instead
+we need to know which variables are possibly called together.
+
+The data structure here is an undirected graph of variables, which is provided
+by the abstract
+ UnVarGraph
+
+It is safe to return a larger graph, i.e. one with more edges. The worst case
+(i.e. the least useful and always correct result) is the complete graph on all
+free variables, which means that anything can be called together with anything
+(including itself).
+
+Notation for the following:
+C(e) is the co-called result for e.
+Gâ‚∪Gâ‚‚ is the union of two graphs
+fv is the set of free variables (conveniently the domain of the arity analysis result)
+Sâ‚×Sâ‚‚ is the complete bipartite graph { {a,b} | a ∈ Sâ‚, b ∈ Sâ‚‚ }
+S² is the complete graph on the set of variables S, S² = S×S
+C'(e) is a variant for bound expression:
+ If e is called at most once, or it is and stays a thunk (after the analysis),
+ it is simply C(e). Otherwise, the expression can be called multiple times
+ and we return (fv e)²
+
+The interesting cases of the analysis:
+ * Var v:
+ No other variables are being called.
+ Return {} (the empty graph)
+ * Lambda v e, under arity 0:
+ This means that e can be evaluated many times and we cannot get
+ any useful co-call information.
+ Return (fv e)²
+ * Case alternatives altâ‚,altâ‚‚,...:
+ Only one can be execuded, so
+ Return (alt₠∪ alt₂ ∪...)
+ * App eâ‚ eâ‚‚ (and analogously Case scrut alts):
+ We get the results from both sides. Additionally, anything called by eâ‚ can
+ possibly called with anything from eâ‚‚.
+ Return: C(eâ‚) ∪ C(eâ‚‚) ∪ (fv eâ‚) × (fv eâ‚‚)
+ * Let v = rhs in body:
+ In addition to the results from the subexpressions, add all co-calls from
+ everything that the body calls together with v to everthing that is called
+ by v.
+ Return: C'(rhs) ∪ C(body) ∪ (fv rhs) × {v'| {v,v'} ∈ C(body)}
+ * Letrec vâ‚ = rhsâ‚ ... vâ‚™ = rhsâ‚™ in body
+ Tricky.
+ We assume that it is really mutually recursive, i.e. that every variable
+ calls one of the others, and that this is strongly connected (otherwise we
+ return an over-approximation, so that's ok), see note [Recursion and fixpointing].
+
+ Let V = {vâ‚,...vâ‚™}.
+ Assume that the vs have been analysed with an incoming demand and
+ cardinality consistent with the final result (this is the fixed-pointing).
+ Again we can use the results from all subexpressions.
+ In addition, for every variable váµ¢, we need to find out what it is called
+ with (call this set Sáµ¢). There are two cases:
+ * If váµ¢ is a function, we need to go through all right-hand-sides and bodies,
+ and collect every variable that is called together with any variable from V:
+ Sáµ¢ = {v' | j ∈ {1,...,n}, {v',vâ±¼} ∈ C'(rhsâ‚) ∪ ... ∪ C'(rhsâ‚™) ∪ C(body) }
+ * If váµ¢ is a thunk, then its rhs is evaluated only once, so we need to
+ exclude it from this set:
+ Sáµ¢ = {v' | j ∈ {1,...,n}, j≠i, {v',vâ±¼} ∈ C'(rhsâ‚) ∪ ... ∪ C'(rhsâ‚™) ∪ C(body) }
+ Finally, combine all this:
+ Return: C(body) ∪
+ C'(rhsâ‚) ∪ ... ∪ C'(rhsâ‚™) ∪
+ (fv rhsâ‚) × Sâ‚) ∪ ... ∪ (fv rhsâ‚™) × Sâ‚™)
+
+Using the result: Eta-Expansion
+-------------------------------
+
+We use the result of these two analyses to decide whether we can eta-expand the
+rhs of a let-bound variable.
+
+If the variable is already a function (exprIsHNF), and all calls to the
+variables have a higher arity than the current manifest arity (i.e. the number
+of lambdas), expand.
+
+If the variable is a thunk we must be careful: Eta-Expansion will prevent
+sharing of work, so this is only safe if there is at most one call to the
+function. Therefore, we check whether {v,v} ∈ G.
+
+ Example:
+
+ let n = case .. of .. -- A thunk!
+ in n 0 + n 1
+
+ vs.
+
+ let n = case .. of ..
+ in case .. of T -> n 0
+ F -> n 1
+
+ We are only allowed to eta-expand `n` if it is going to be called at most
+ once in the body of the outer let. So we need to know, for each variable
+ individually, that it is going to be called at most once.
+
+
+Why the co-call graph?
+----------------------
+
+Why is it not sufficient to simply remember which variables are called once and
+which are called multiple times? It would be in the previous example, but consider
+
+ let n = case .. of ..
+ in case .. of
+ True -> let go = \y -> case .. of
+ True -> go (y + n 1)
+ False > n
+ in go 1
+ False -> n
+
+vs.
+
+ let n = case .. of ..
+ in case .. of
+ True -> let go = \y -> case .. of
+ True -> go (y+1)
+ False > n
+ in go 1
+ False -> n
+
+In both cases, the body and the rhs of the inner let call n at most once.
+But only in the second case that holds for the whole expression! The
+crucial difference is that in the first case, the rhs of `go` can call
+*both* `go` and `n`, and hence can call `n` multiple times as it recurses,
+while in the second case find out that `go` and `n` are not called together.
+
+
+Why co-call information for functions?
+--------------------------------------
+
+Although for eta-expansion we need the information only for thunks, we still
+need to know whether functions are being called once or multiple times, and
+together with what other functions.
+
+ Example:
+
+ let n = case .. of ..
+ f x = n (x+1)
+ in f 1 + f 2
+
+ vs.
+
+ let n = case .. of ..
+ f x = n (x+1)
+ in case .. of T -> f 0
+ F -> f 1
+
+ Here, the body of f calls n exactly once, but f itself is being called
+ multiple times, so eta-expansion is not allowed.
+
+
+Note [Analysis type signature]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The work-hourse of the analysis is the function `callArityAnal`, with the
+following type:
+
+ type CallArityRes = (UnVarGraph, VarEnv Arity)
+ callArityAnal ::
+ Arity -> -- The arity this expression is called with
+ VarSet -> -- The set of interesting variables
+ CoreExpr -> -- The expression to analyse
+ (CallArityRes, CoreExpr)
+
+and the following specification:
+
+ ((coCalls, callArityEnv), expr') = callArityEnv arity interestingIds expr
+
+ <=>
+
+ Assume the expression `expr` is being passed `arity` arguments. Then it holds that
+ * The domain of `callArityEnv` is a subset of `interestingIds`.
+ * Any variable from `interestingIds` that is not mentioned in the `callArityEnv`
+ is absent, i.e. not called at all.
+ * Every call from `expr` to a variable bound to n in `callArityEnv` has at
+ least n value arguments.
+ * For two interesting variables `v1` and `v2`, they are not adjacent in `coCalls`,
+ then in no execution of `expr` both are being called.
+ Furthermore, expr' is expr with the callArity field of the `IdInfo` updated.
+
+
+Note [Which variables are interesting]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The analysis would quickly become prohibitive expensive if we would analyse all
+variables; for most variables we simply do not care about how often they are
+called, i.e. variables bound in a pattern match. So interesting are variables that are
+ * top-level or let bound
+ * and possibly functions (typeArity > 0)
+
+Note [Information about boring variables]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If we decide that the variable bound in `let x = e1 in e2` is not interesting,
+the analysis of `e2` will not report anything about `x`. To ensure that
+`callArityBind` does still do the right thing we have to extend the result from
+`e2` with a safe approximation.
+
+This is done using `fakeBoringCalls` and has the effect of analysing
+ x `seq` x `seq` e2
+instead, i.e. with `both` the result from `e2` with the most conservative
+result about the uninteresting value.
+
+Note [Recursion and fixpointing]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For a mutually recursive let, we begin by
+ 1. analysing the body, using the same incoming arity as for the whole expression.
+ 2. Then we iterate, memoizing for each of the bound variables the last
+ analysis call, i.e. incoming arity, whether it is called once, and the CallArityRes.
+ 3. We combine the analysis result from the body and the memoized results for
+ the arguments (if already present).
+ 4. For each variable, we find out the incoming arity and whether it is called
+ once, based on the the current analysis result. If this differs from the
+ memoized results, we re-analyse the rhs and update the memoized table.
+ 5. If nothing had to be reanalized, we are done.
+ Otherwise, repeat from step 3.
+
+
+Note [Thunks in recursive groups]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We never eta-expand a thunk in a recursive group, on the grounds that if it is
+part of a recursive group, then it will be called multipe times.
+
+This is not necessarily true, e.g. it would be safe to eta-expand t2 (but not
+t1) in the follwing code:
+
+ let go x = t1
+ t1 = if ... then t2 else ...
+ t2 = if ... then go 1 else ...
+ in go 0
+
+Detecting this would reqiure finding out what variables are only ever called
+from thunks. While this is certainly possible, we yet have to see this to be
+relevant in the wild.
+
+
+Note [Analysing top-level binds]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We can eta-expand top-level-binds if they are not exported, as we see all calls
+to them. The plan is as follows: Treat the top-level binds as nested lets around
+a body representing “all external callsâ€, which returns a pessimistic
+CallArityRes (the co-call graph is the complete graph, all arityies 0).
+
+-}
+
+-- Main entry point
+
+callArityAnalProgram :: DynFlags -> CoreProgram -> CoreProgram
+callArityAnalProgram _dflags binds = binds'
+ where
+ (_, binds') = callArityTopLvl [] emptyVarSet binds
+
+-- See Note [Analysing top-level-binds]
+callArityTopLvl :: [Var] -> VarSet -> [CoreBind] -> (CallArityRes, [CoreBind])
+callArityTopLvl exported _ []
+ = ( calledMultipleTimes $ (emptyUnVarGraph, mkVarEnv $ [(v, 0) | v <- exported])
+ , [] )
+callArityTopLvl exported int1 (b:bs)
+ = (ae2, b':bs')
+ where
+ int2 = bindersOf b
+ exported' = filter isExportedId int2 ++ exported
+ int' = int1 `addInterestingBinds` b
+ (ae1, bs') = callArityTopLvl exported' int' bs
+ ae1' = fakeBoringCalls int' b ae1 -- See Note [Information about boring variables]
+ (ae2, b') = callArityBind ae1' int1 b
+
+
+callArityRHS :: CoreExpr -> CoreExpr
+callArityRHS = snd . callArityAnal 0 emptyVarSet
+
+-- The main analysis function. See Note [Analysis type signature]
+callArityAnal ::
+ Arity -> -- The arity this expression is called with
+ VarSet -> -- The set of interesting variables
+ CoreExpr -> -- The expression to analyse
+ (CallArityRes, CoreExpr)
+ -- How this expression uses its interesting variables
+ -- and the expression with IdInfo updated
+
+-- The trivial base cases
+callArityAnal _ _ e@(Lit _)
+ = (emptyArityRes, e)
+callArityAnal _ _ e@(Type _)
+ = (emptyArityRes, e)
+callArityAnal _ _ e@(Coercion _)
+ = (emptyArityRes, e)
+-- The transparent cases
+callArityAnal arity int (Tick t e)
+ = second (Tick t) $ callArityAnal arity int e
+callArityAnal arity int (Cast e co)
+ = second (\e -> Cast e co) $ callArityAnal arity int e
+
+-- The interesting case: Variables, Lambdas, Lets, Applications, Cases
+callArityAnal arity int e@(Var v)
+ | v `elemVarSet` int
+ = (unitArityRes v arity, e)
+ | otherwise
+ = (emptyArityRes, e)
+
+-- Non-value lambdas are ignored
+callArityAnal arity int (Lam v e) | not (isId v)
+ = second (Lam v) $ callArityAnal arity (int `delVarSet` v) e
+
+-- We have a lambda that may be called multiple times, so its free variables
+-- can all be co-called.
+callArityAnal 0 int (Lam v e)
+ = (ae', Lam v e')
+ where
+ (ae, e') = callArityAnal 0 (int `delVarSet` v) e
+ ae' = calledMultipleTimes ae
+-- We have a lambda that we are calling. decrease arity.
+callArityAnal arity int (Lam v e)
+ = (ae, Lam v e')
+ where
+ (ae, e') = callArityAnal (arity - 1) (int `delVarSet` v) e
+
+-- Application. Increase arity for the called expresion, nothing to know about
+-- the second
+callArityAnal arity int (App e (Type t))
+ = second (\e -> App e (Type t)) $ callArityAnal arity int e
+callArityAnal arity int (App e1 e2)
+ = (final_ae, App e1' e2')
+ where
+ (ae1, e1') = callArityAnal (arity + 1) int e1
+ (ae2, e2') = callArityAnal 0 int e2
+ -- See Note [Case and App: Which side to take?]
+ final_ae = ae1 `both` ae2
+
+-- Case expression.
+callArityAnal arity int (Case scrut bndr ty alts)
+ = -- pprTrace "callArityAnal:Case"
+ -- (vcat [ppr scrut, ppr final_ae])
+ (final_ae, Case scrut' bndr ty alts')
+ where
+ (alt_aes, alts') = unzip $ map go alts
+ go (dc, bndrs, e) = let (ae, e') = callArityAnal arity int e
+ in (ae, (dc, bndrs, e'))
+ alt_ae = lubRess alt_aes
+ (scrut_ae, scrut') = callArityAnal 0 int scrut
+ -- See Note [Case and App: Which side to take?]
+ final_ae = scrut_ae `both` alt_ae
+
+-- For lets, use callArityBind
+callArityAnal arity int (Let bind e)
+ = -- pprTrace "callArityAnal:Let"
+ -- (vcat [ppr v, ppr arity, ppr n, ppr final_ae ])
+ (final_ae, Let bind' e')
+ where
+ int_body = int `addInterestingBinds` bind
+ (ae_body, e') = callArityAnal arity int_body e
+ ae_body' = fakeBoringCalls int_body bind ae_body -- See Note [Information about boring variables]
+ (final_ae, bind') = callArityBind ae_body' int bind
+
+-- Which bindings should we look at?
+-- See Note [Which variables are interesting]
+interestingBinds :: CoreBind -> [Var]
+interestingBinds = filter go . bindersOf
+ where go v = 0 < length (typeArity (idType v))
+
+addInterestingBinds :: VarSet -> CoreBind -> VarSet
+addInterestingBinds int bind
+ = int `delVarSetList` bindersOf bind -- Possible shadowing
+ `extendVarSetList` interestingBinds bind
+
+-- For every boring variable in the binder, add a safe approximation
+-- See Note [Information about boring variables]
+fakeBoringCalls :: VarSet -> CoreBind -> CallArityRes -> CallArityRes
+fakeBoringCalls int bind res = boring `both` res
+ where
+ boring = calledMultipleTimes $
+ ( emptyUnVarGraph
+ , mkVarEnv [ (v, 0) | v <- bindersOf bind, not (v `elemVarSet` int)])
+
+
+-- Used for both local and top-level binds
+-- First argument is the demand from the body
+callArityBind :: CallArityRes -> VarSet -> CoreBind -> (CallArityRes, CoreBind)
+-- Non-recursive let
+callArityBind ae_body int (NonRec v rhs)
+ | otherwise
+ = -- pprTrace "callArityBind:NonRec"
+ -- (vcat [ppr v, ppr ae_body, ppr int, ppr ae_rhs, ppr safe_arity])
+ (final_ae, NonRec v' rhs')
+ where
+ is_thunk = not (exprIsHNF rhs)
+
+ (arity, called_once) = lookupCallArityRes ae_body v
+ safe_arity | called_once = arity
+ | is_thunk = 0 -- A thunk! Do not eta-expand
+ | otherwise = arity
+ (ae_rhs, rhs') = callArityAnal safe_arity int rhs
+
+ ae_rhs'| called_once = ae_rhs
+ | safe_arity == 0 = ae_rhs -- If it is not a function, its body is evaluated only once
+ | otherwise = calledMultipleTimes ae_rhs
+
+ final_ae = callArityNonRecEnv v ae_rhs' ae_body
+ v' = v `setIdCallArity` safe_arity
+
+
+
+-- Recursive let. See Note [Recursion and fixpointing]
+callArityBind ae_body int b@(Rec binds)
+ = -- pprTrace "callArityBind:Rec"
+ -- (vcat [ppr (Rec binds'), ppr ae_body, ppr int, ppr ae_rhs]) $
+ (final_ae, Rec binds')
+ where
+ int_body = int `addInterestingBinds` b
+ (ae_rhs, binds') = fix initial_binds
+ final_ae = bindersOf b `resDelList` ae_rhs
+
+ initial_binds = [(i,Nothing,e) | (i,e) <- binds]
+
+ fix :: [(Id, Maybe (Bool, Arity, CallArityRes), CoreExpr)] -> (CallArityRes, [(Id, CoreExpr)])
+ fix ann_binds
+ | -- pprTrace "callArityBind:fix" (vcat [ppr ann_binds, ppr any_change, ppr ae]) $
+ any_change
+ = fix ann_binds'
+ | otherwise
+ = (ae, map (\(i, _, e) -> (i, e)) ann_binds')
+ where
+ aes_old = [ (i,ae) | (i, Just (_,_,ae), _) <- ann_binds ]
+ ae = callArityRecEnv aes_old ae_body
+
+ rerun (i, mbLastRun, rhs)
+ | i `elemVarSet` int_body && not (i `elemUnVarSet` domRes ae)
+ -- No call to this yet, so do nothing
+ = (False, (i, Nothing, rhs))
+
+ | Just (old_called_once, old_arity, _) <- mbLastRun
+ , called_once == old_called_once
+ , new_arity == old_arity
+ -- No change, no need to re-analize
+ = (False, (i, mbLastRun, rhs))
+
+ | otherwise
+ -- We previously analized this with a different arity (or not at all)
+ = let is_thunk = not (exprIsHNF rhs)
+
+ safe_arity | is_thunk = 0 -- See Note [Thunks in recursive groups]
+ | otherwise = new_arity
+
+ (ae_rhs, rhs') = callArityAnal safe_arity int_body rhs
+
+ ae_rhs' | called_once = ae_rhs
+ | safe_arity == 0 = ae_rhs -- If it is not a function, its body is evaluated only once
+ | otherwise = calledMultipleTimes ae_rhs
+
+ in (True, (i `setIdCallArity` safe_arity, Just (called_once, new_arity, ae_rhs'), rhs'))
+ where
+ (new_arity, called_once) = lookupCallArityRes ae i
+
+ (changes, ann_binds') = unzip $ map rerun ann_binds
+ any_change = or changes
+
+-- Combining the results from body and rhs, non-recursive case
+-- See Note [Analysis II: The Co-Called analysis]
+callArityNonRecEnv :: Var -> CallArityRes -> CallArityRes -> CallArityRes
+callArityNonRecEnv v ae_rhs ae_body
+ = addCrossCoCalls called_by_v called_with_v $ ae_rhs `lubRes` resDel v ae_body
+ where
+ called_by_v = domRes ae_rhs
+ called_with_v = calledWith ae_body v `delUnVarSet` v
+
+-- Combining the results from body and rhs, (mutually) recursive case
+-- See Note [Analysis II: The Co-Called analysis]
+callArityRecEnv :: [(Var, CallArityRes)] -> CallArityRes -> CallArityRes
+callArityRecEnv ae_rhss ae_body
+ = -- pprTrace "callArityRecEnv" (vcat [ppr ae_rhss, ppr ae_body, ppr ae_new])
+ ae_new
+ where
+ vars = map fst ae_rhss
+
+ ae_combined = lubRess (map snd ae_rhss) `lubRes` ae_body
+
+ cross_calls = unionUnVarGraphs $ map cross_call ae_rhss
+ cross_call (v, ae_rhs) = completeBipartiteGraph called_by_v called_with_v
+ where
+ is_thunk = idCallArity v == 0
+ -- What rhs are relevant as happening before (or after) calling v?
+ -- If v is a thunk, everything from all the _other_ variables
+ -- If v is not a thunk, everything can happen.
+ ae_before_v | is_thunk = lubRess (map snd $ filter ((/= v) . fst) ae_rhss) `lubRes` ae_body
+ | otherwise = ae_combined
+ -- What do we want to know from these?
+ -- Which calls can happen next to any recursive call.
+ called_with_v
+ = unionUnVarSets $ map (calledWith ae_before_v) vars
+ called_by_v = domRes ae_rhs
+
+ ae_new = first (cross_calls `unionUnVarGraph`) ae_combined
+
+---------------------------------------
+-- Functions related to CallArityRes --
+---------------------------------------
+
+-- Result type for the two analyses.
+-- See Note [Analysis I: The arity analyis]
+-- and Note [Analysis II: The Co-Called analysis]
+type CallArityRes = (UnVarGraph, VarEnv Arity)
+
+emptyArityRes :: CallArityRes
+emptyArityRes = (emptyUnVarGraph, emptyVarEnv)
+
+unitArityRes :: Var -> Arity -> CallArityRes
+unitArityRes v arity = (emptyUnVarGraph, unitVarEnv v arity)
+
+resDelList :: [Var] -> CallArityRes -> CallArityRes
+resDelList vs ae = foldr resDel ae vs
+
+resDel :: Var -> CallArityRes -> CallArityRes
+resDel v (g, ae) = (g `delNode` v, ae `delVarEnv` v)
+
+domRes :: CallArityRes -> UnVarSet
+domRes (_, ae) = varEnvDom ae
+
+-- In the result, find out the minimum arity and whether the variable is called
+-- at most once.
+lookupCallArityRes :: CallArityRes -> Var -> (Arity, Bool)
+lookupCallArityRes (g, ae) v
+ = case lookupVarEnv ae v of
+ Just a -> (a, not (v `elemUnVarSet` (neighbors g v)))
+ Nothing -> (0, False)
+
+calledWith :: CallArityRes -> Var -> UnVarSet
+calledWith (g, _) v = neighbors g v
+
+addCrossCoCalls :: UnVarSet -> UnVarSet -> CallArityRes -> CallArityRes
+addCrossCoCalls set1 set2 = first (completeBipartiteGraph set1 set2 `unionUnVarGraph`)
+
+-- Replaces the co-call graph by a complete graph (i.e. no information)
+calledMultipleTimes :: CallArityRes -> CallArityRes
+calledMultipleTimes res = first (const (completeGraph (domRes res))) res
+
+-- Used for application and cases
+both :: CallArityRes -> CallArityRes -> CallArityRes
+both r1 r2 = addCrossCoCalls (domRes r1) (domRes r2) $ r1 `lubRes` r2
+
+-- Used when combining results from alternative cases; take the minimum
+lubRes :: CallArityRes -> CallArityRes -> CallArityRes
+lubRes (g1, ae1) (g2, ae2) = (g1 `unionUnVarGraph` g2, ae1 `lubArityEnv` ae2)
+
+lubArityEnv :: VarEnv Arity -> VarEnv Arity -> VarEnv Arity
+lubArityEnv = plusVarEnv_C min
+
+lubRess :: [CallArityRes] -> CallArityRes
+lubRess = foldl lubRes emptyArityRes
diff --git a/compiler/simplCore/CoreMonad.lhs b/compiler/simplCore/CoreMonad.lhs
index 7f45850d388f..c06036044d04 100644
--- a/compiler/simplCore/CoreMonad.lhs
+++ b/compiler/simplCore/CoreMonad.lhs
@@ -4,15 +4,14 @@
\section[CoreMonad]{The core pipeline monad}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, UndecidableInstances #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
-{-# LANGUAGE UndecidableInstances #-}
-
module CoreMonad (
-- * Configuration of the core-to-core passes
CoreToDo(..), runWhen, runMaybe,
@@ -305,6 +304,7 @@ data CoreToDo -- These are diff core-to-core passes,
| CoreLiberateCase
| CoreDoPrintCore
| CoreDoStaticArgs
+ | CoreDoCallArity
| CoreDoStrictness
| CoreDoWorkerWrapper
| CoreDoSpecialising
@@ -333,6 +333,7 @@ coreDumpFlag CoreDoFloatInwards = Just Opt_D_verbose_core2core
coreDumpFlag (CoreDoFloatOutwards {}) = Just Opt_D_verbose_core2core
coreDumpFlag CoreLiberateCase = Just Opt_D_verbose_core2core
coreDumpFlag CoreDoStaticArgs = Just Opt_D_verbose_core2core
+coreDumpFlag CoreDoCallArity = Just Opt_D_dump_call_arity
coreDumpFlag CoreDoStrictness = Just Opt_D_dump_stranal
coreDumpFlag CoreDoWorkerWrapper = Just Opt_D_dump_worker_wrapper
coreDumpFlag CoreDoSpecialising = Just Opt_D_dump_spec
@@ -356,6 +357,7 @@ instance Outputable CoreToDo where
ppr (CoreDoFloatOutwards f) = ptext (sLit "Float out") <> parens (ppr f)
ppr CoreLiberateCase = ptext (sLit "Liberate case")
ppr CoreDoStaticArgs = ptext (sLit "Static argument")
+ ppr CoreDoCallArity = ptext (sLit "Called arity analysis")
ppr CoreDoStrictness = ptext (sLit "Demand analysis")
ppr CoreDoWorkerWrapper = ptext (sLit "Worker Wrapper binds")
ppr CoreDoSpecialising = ptext (sLit "Specialise")
diff --git a/compiler/simplCore/FloatIn.lhs b/compiler/simplCore/FloatIn.lhs
index 8a35749c6775..2b7ee5c6d39c 100644
--- a/compiler/simplCore/FloatIn.lhs
+++ b/compiler/simplCore/FloatIn.lhs
@@ -12,7 +12,8 @@ case, so that we don't allocate things, save them on the stack, and
then discover that they aren't needed in the chosen branch.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -25,16 +26,17 @@ module FloatIn ( floatInwards ) where
import CoreSyn
import MkCore
-import CoreUtils ( exprIsDupable, exprIsExpandable, exprOkForSideEffects )
+import CoreUtils ( exprIsDupable, exprIsExpandable, exprType, exprOkForSideEffects )
import CoreFVs ( CoreExprWithFVs, freeVars, freeVarsOf, idRuleAndUnfoldingVars )
import Id ( isOneShotBndr, idType )
import Var
-import Type ( isUnLiftedType )
+import Type ( Type, isUnLiftedType, splitFunTy, applyTy )
import VarSet
import Util
import UniqFM
import DynFlags
import Outputable
+import Data.List( mapAccumL )
\end{code}
Top-level interface function, @floatInwards@. Note that we do not
@@ -154,18 +156,42 @@ need to get at all the arguments. The next simplifier run will
pull out any silly ones.
\begin{code}
-fiExpr dflags to_drop (_,AnnApp fun arg@(arg_fvs, ann_arg))
- | noFloatIntoRhs ann_arg = wrapFloats drop_here $ wrapFloats arg_drop $
- App (fiExpr dflags fun_drop fun) (fiExpr dflags [] arg)
- -- It's inconvenient to test for an unlifted arg here,
- -- and it really doesn't matter if we float into one
- | otherwise = wrapFloats drop_here $
- App (fiExpr dflags fun_drop fun) (fiExpr dflags arg_drop arg)
+fiExpr dflags to_drop ann_expr@(_,AnnApp {})
+ = wrapFloats drop_here $ wrapFloats extra_drop $
+ mkApps (fiExpr dflags fun_drop ann_fun)
+ (zipWith (fiExpr dflags) arg_drops ann_args)
where
- [drop_here, fun_drop, arg_drop]
- = sepBindsByDropPoint dflags False [freeVarsOf fun, arg_fvs] to_drop
+ (ann_fun@(fun_fvs, _), ann_args) = collectAnnArgs ann_expr
+ fun_ty = exprType (deAnnotate ann_fun)
+ ((_,extra_fvs), arg_fvs) = mapAccumL mk_arg_fvs (fun_ty, emptyVarSet) ann_args
+
+ -- All this faffing about is so that we can get hold of
+ -- the types of the arguments, to pass to noFloatIntoRhs
+ mk_arg_fvs :: (Type, FreeVarSet) -> CoreExprWithFVs -> ((Type, FreeVarSet), FreeVarSet)
+ mk_arg_fvs (fun_ty, extra_fvs) (_, AnnType ty)
+ = ((applyTy fun_ty ty, extra_fvs), emptyVarSet)
+
+ mk_arg_fvs (fun_ty, extra_fvs) (arg_fvs, ann_arg)
+ | noFloatIntoRhs ann_arg arg_ty
+ = ((res_ty, extra_fvs `unionVarSet` arg_fvs), emptyVarSet)
+ | otherwise
+ = ((res_ty, extra_fvs), arg_fvs)
+ where
+ (arg_ty, res_ty) = splitFunTy fun_ty
+
+ drop_here : extra_drop : fun_drop : arg_drops
+ = sepBindsByDropPoint dflags False (extra_fvs : fun_fvs : arg_fvs) to_drop
\end{code}
+Note [Do not destroy the let/app invariant]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Watch out for
+ f (x +# y)
+We don't want to float bindings into here
+ f (case ... of { x -> x +# y })
+because that might destroy the let/app invariant, which requires
+unlifted function arguments to be ok-for-speculation.
+
Note [Floating in past a lambda group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* We must be careful about floating inside inside a value lambda.
@@ -224,12 +250,12 @@ We don't float lets inwards past an SCC.
\begin{code}
fiExpr dflags to_drop (_, AnnTick tickish expr)
- | tickishScoped tickish
- = -- Wimp out for now - we could push values in
- wrapFloats to_drop (Tick tickish (fiExpr dflags [] expr))
-
- | otherwise
+ | tickish `tickishScopesLike` SoftScope
= Tick tickish (fiExpr dflags to_drop expr)
+
+ | otherwise -- Wimp out for now - we could push values in
+ = wrapFloats to_drop (Tick tickish (fiExpr dflags [] expr))
+
\end{code}
For @Lets@, the possible ``drop points'' for the \tr{to_drop}
@@ -274,8 +300,8 @@ arrange to dump bindings that bind extra_fvs before the entire let.
Note [extra_fvs (2): free variables of rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Consider
- let x{rule mentioning y} = rhs in body
+Consider
+ let x{rule mentioning y} = rhs in body
Here y is not free in rhs or body; but we still want to dump bindings
that bind y outside the let. So we augment extra_fvs with the
idRuleAndUnfoldingVars of x. No need for type variables, hence not using
@@ -287,11 +313,11 @@ fiExpr dflags to_drop (_,AnnLet (AnnNonRec id rhs@(rhs_fvs, ann_rhs)) body)
= fiExpr dflags new_to_drop body
where
body_fvs = freeVarsOf body `delVarSet` id
+ rhs_ty = idType id
rule_fvs = idRuleAndUnfoldingVars id -- See Note [extra_fvs (2): free variables of rules]
- extra_fvs | noFloatIntoRhs ann_rhs
- || isUnLiftedType (idType id) = rule_fvs `unionVarSet` rhs_fvs
- | otherwise = rule_fvs
+ extra_fvs | noFloatIntoRhs ann_rhs rhs_ty = rule_fvs `unionVarSet` rhs_fvs
+ | otherwise = rule_fvs
-- See Note [extra_fvs (1): avoid floating into RHS]
-- No point in floating in only to float straight out again
-- Ditto ok-for-speculation unlifted RHSs
@@ -321,7 +347,7 @@ fiExpr dflags to_drop (_,AnnLet (AnnRec bindings) body)
rule_fvs = foldr (unionVarSet . idRuleAndUnfoldingVars) emptyVarSet ids
extra_fvs = rule_fvs `unionVarSet`
unionVarSets [ fvs | (fvs, rhs) <- rhss
- , noFloatIntoRhs rhs ]
+ , noFloatIntoExpr rhs ]
(shared_binds:extra_binds:body_binds:rhss_binds)
= sepBindsByDropPoint dflags False (extra_fvs:body_fvs:rhss_fvs) to_drop
@@ -363,6 +389,7 @@ floating in cases with a single alternative that may bind values.
fiExpr dflags to_drop (_, AnnCase scrut case_bndr _ [(con,alt_bndrs,rhs)])
| isUnLiftedType (idType case_bndr)
, exprOkForSideEffects (deAnnotate scrut)
+ -- See PrimOp, Note [PrimOp can_fail and has_side_effects]
= wrapFloats shared_binds $
fiExpr dflags (case_float : rhs_binds) rhs
where
@@ -402,8 +429,15 @@ okToFloatInside bndrs = all ok bndrs
ok b = not (isId b) || isOneShotBndr b
-- Push the floats inside there are no non-one-shot value binders
-noFloatIntoRhs :: AnnExpr' Var (UniqFM Var) -> Bool
-noFloatIntoRhs (AnnLam bndr e)
+noFloatIntoRhs :: AnnExpr' Var (UniqFM Var) -> Type -> Bool
+-- ^ True if it's a bad idea to float bindings into this RHS
+-- Preconditio: rhs :: rhs_ty
+noFloatIntoRhs rhs rhs_ty
+ = isUnLiftedType rhs_ty -- See Note [Do not destroy the let/app invariant]
+ || noFloatIntoExpr rhs
+
+noFloatIntoExpr :: AnnExpr' Var (UniqFM Var) -> Bool
+noFloatIntoExpr (AnnLam bndr e)
= not (okToFloatInside (bndr:bndrs))
-- NB: Must line up with fiExpr (AnnLam...); see Trac #7088
where
@@ -417,7 +451,7 @@ noFloatIntoRhs (AnnLam bndr e)
-- boxing constructor into it, else we box it every time which is very bad
-- news indeed.
-noFloatIntoRhs rhs = exprIsExpandable (deAnnotate' rhs)
+noFloatIntoExpr rhs = exprIsExpandable (deAnnotate' rhs)
-- We'd just float right back out again...
-- Should match the test in SimplEnv.doFloatFromRhs
\end{code}
diff --git a/compiler/simplCore/FloatOut.lhs b/compiler/simplCore/FloatOut.lhs
index fbe8a3eb8a16..f66dbe306e3b 100644
--- a/compiler/simplCore/FloatOut.lhs
+++ b/compiler/simplCore/FloatOut.lhs
@@ -6,8 +6,9 @@
``Long-distance'' floating of bindings towards the top level.
\begin{code}
+{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -285,18 +286,20 @@ floatExpr lam@(Lam (TB _ lam_spec) _)
(add_to_stats fs floats, floats, mkLams bndrs body') }
floatExpr (Tick tickish expr)
- | tickishScoped tickish
+ | tickish `tickishScopesLike` SoftScope -- not scoped, can just float
= case (floatExpr expr) of { (fs, floating_defns, expr') ->
- let
- -- Annotate bindings floated outwards past an scc expression
- -- with the cc. We mark that cc as "duplicated", though.
+ (fs, floating_defns, Tick tickish expr') }
+
+ | not (tickishCounts tickish) || tickishCanSplit tickish
+ = case (floatExpr expr) of { (fs, floating_defns, expr') ->
+ let -- Annotate bindings floated outwards past an scc expression
+ -- with the cc. We mark that cc as "duplicated", though.
annotated_defns = wrapTick (mkNoCount tickish) floating_defns
in
(fs, annotated_defns, Tick tickish expr') }
- | otherwise -- not scoped, can just float
- = case (floatExpr expr) of { (fs, floating_defns, expr') ->
- (fs, floating_defns, Tick tickish expr') }
+ | otherwise
+ = pprPanic "floatExpr tick" (ppr tickish)
floatExpr (Cast expr co)
= case (floatExpr expr) of { (fs, floating_defns, expr') ->
@@ -457,11 +460,6 @@ data FloatBinds = FB !(Bag FloatLet) -- Destined for top level
!MajorEnv -- Levels other than top
-- See Note [Representation of FloatBinds]
-instance Outputable FloatBind where
- ppr (FloatLet b) = ptext (sLit "LET") <+> ppr b
- ppr (FloatCase e b c bs) = hang (ptext (sLit "CASE") <+> ppr e <+> ptext (sLit "of") <+> ppr b)
- 2 (ppr c <+> ppr bs)
-
instance Outputable FloatBinds where
ppr (FB fbs defs)
= ptext (sLit "FB") <+> (braces $ vcat
diff --git a/compiler/simplCore/LiberateCase.lhs b/compiler/simplCore/LiberateCase.lhs
index a89396b7827b..2593ab159cdb 100644
--- a/compiler/simplCore/LiberateCase.lhs
+++ b/compiler/simplCore/LiberateCase.lhs
@@ -4,7 +4,8 @@
\section[LiberateCase]{Unroll recursion to allow evals to be lifted from a loop}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/simplCore/OccurAnal.lhs b/compiler/simplCore/OccurAnal.lhs
index 2487787c8dba..ddda37f9f3e0 100644
--- a/compiler/simplCore/OccurAnal.lhs
+++ b/compiler/simplCore/OccurAnal.lhs
@@ -12,7 +12,8 @@ The occurrence analyser re-typechecks a core expression, returning a new
core expression with (hopefully) improved usage information.
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE CPP, BangPatterns #-}
+
module OccurAnal (
occurAnalysePgm, occurAnalyseExpr, occurAnalyseExpr_NoBinderSwap
) where
@@ -21,7 +22,8 @@ module OccurAnal (
import CoreSyn
import CoreFVs
-import CoreUtils ( exprIsTrivial, isDefaultAlt, isExpandableApp )
+import CoreUtils ( exprIsTrivial, isDefaultAlt, isExpandableApp,
+ stripTicksTop )
import Id
import Name( localiseName )
import BasicTypes
@@ -40,6 +42,7 @@ import Util
import Outputable
import FastString
import Data.List
+import Control.Arrow ( second )
\end{code}
@@ -1181,18 +1184,19 @@ we can sort them into the right place when doing dependency analysis.
\begin{code}
occAnal env (Tick tickish body)
+ | tickish `tickishScopesLike` SoftScope
+ = (usage, Tick tickish body')
+
| Breakpoint _ ids <- tickish
- = (mapVarEnv markInsideSCC usage
- +++ mkVarEnv (zip ids (repeat NoOccInfo)), Tick tickish body')
+ = (usage_lam +++ mkVarEnv (zip ids (repeat NoOccInfo)), Tick tickish body')
-- never substitute for any of the Ids in a Breakpoint
- | tickishScoped tickish
- = (mapVarEnv markInsideSCC usage, Tick tickish body')
-
| otherwise
- = (usage, Tick tickish body')
+ = (usage_lam, Tick tickish body')
where
!(usage,body') = occAnal env body
+ -- for a non-soft tick scope, we can inline lambdas only
+ usage_lam = mapVarEnv markInsideLam usage
occAnal env (Cast expr co)
= case occAnal env expr of { (usage, expr') ->
@@ -1204,6 +1208,7 @@ occAnal env (Cast expr co)
-- then mark y as 'Many' so that we don't
-- immediately inline y again.
}
+
\end{code}
\begin{code}
@@ -1275,6 +1280,13 @@ occAnal env (Case scrut bndr ty alts)
= (mkOneOcc env v True, Var v) -- The 'True' says that the variable occurs
-- in an interesting context; the case has
-- at least one non-default alternative
+ occ_anal_scrut (Tick t e) alts
+ | t `tickishScopesLike` SoftScope
+ -- No reason to not look through all ticks here, but only
+ -- for soft-scoped ticks we can do so without having to
+ -- update returned occurance info (see occAnal)
+ = second (Tick t) $ occ_anal_scrut e alts
+
occ_anal_scrut scrut _alts
= occAnal (vanillaCtxt env) scrut -- No need for rhsCtxt
@@ -1729,7 +1741,7 @@ mkAltEnv :: OccEnv -> CoreExpr -> Id -> (OccEnv, Maybe (Id, CoreExpr))
-- c) returns a proxy mapping, binding the scrutinee
-- to the case binder, if possible
mkAltEnv env@(OccEnv { occ_gbl_scrut = pe }) scrut case_bndr
- = case scrut of
+ = case snd (stripTicksTop (const True) scrut) of
Var v -> add_scrut v case_bndr'
Cast (Var v) co -> add_scrut v (Cast case_bndr' (mkSymCo co))
-- See Note [Case of cast]
@@ -1843,13 +1855,10 @@ mkOneOcc env id int_cxt
| otherwise
= emptyDetails
-markMany, markInsideLam, markInsideSCC :: OccInfo -> OccInfo
+markMany, markInsideLam :: OccInfo -> OccInfo
markMany _ = NoOccInfo
-markInsideSCC occ = markInsideLam occ
- -- inside an SCC, we can inline lambdas only.
-
markInsideLam (OneOcc _ one_br int_cxt) = OneOcc True one_br int_cxt
markInsideLam occ = occ
diff --git a/compiler/simplCore/SAT.lhs b/compiler/simplCore/SAT.lhs
index bc1ce42cd628..92ebdfe389a5 100644
--- a/compiler/simplCore/SAT.lhs
+++ b/compiler/simplCore/SAT.lhs
@@ -49,7 +49,8 @@ essential to make this work well!
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/simplCore/SetLevels.lhs b/compiler/simplCore/SetLevels.lhs
index 7bcc53f6dec3..ff55e9964877 100644
--- a/compiler/simplCore/SetLevels.lhs
+++ b/compiler/simplCore/SetLevels.lhs
@@ -42,7 +42,8 @@
the scrutinee of the case, and we can inline it.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -63,28 +64,27 @@ module SetLevels (
import CoreSyn
import CoreMonad ( FloatOutSwitches(..) )
-import CoreUtils ( exprType, exprOkForSpeculation )
+import CoreUtils ( exprType, exprOkForSpeculation, exprIsBottom )
import CoreArity ( exprBotStrictness_maybe )
import CoreFVs -- all of it
import Coercion ( isCoVar )
-import CoreSubst ( Subst, emptySubst, extendInScope, substBndr, substRecBndrs,
- extendIdSubst, extendSubstWithVar, cloneBndr,
- cloneRecIdBndrs, substTy, substCo )
-import MkCore ( sortQuantVars )
+import CoreSubst ( Subst, emptySubst, substBndrs, substRecBndrs,
+ extendIdSubst, extendSubstWithVar, cloneBndrs,
+ cloneRecIdBndrs, substTy, substCo, substVarSet )
+import MkCore ( sortQuantVars )
import Id
import IdInfo
import Var
import VarSet
import VarEnv
import Literal ( litIsTrivial )
-import Demand ( StrictSig, increaseStrictSigArity )
+import Demand ( StrictSig )
import Name ( getOccName, mkSystemVarName )
import OccName ( occNameString )
import Type ( isUnLiftedType, Type, mkPiTypes )
-import BasicTypes ( Arity )
+import BasicTypes ( Arity, RecFlag(..) )
import UniqSupply
import Util
-import MonadUtils
import Outputable
import FastString
\end{code}
@@ -235,16 +235,14 @@ setLevels float_lams binds us
lvlTopBind :: LevelEnv -> Bind Id -> LvlM (LevelledBind, LevelEnv)
lvlTopBind env (NonRec bndr rhs)
- = do rhs' <- lvlExpr tOP_LEVEL env (freeVars rhs)
- let bndr' = TB bndr (StayPut tOP_LEVEL)
- env' = extendLvlEnv env [bndr']
- return (NonRec bndr' rhs', env')
+ = do { rhs' <- lvlExpr env (freeVars rhs)
+ ; let (env', [bndr']) = substAndLvlBndrs NonRecursive env tOP_LEVEL [bndr]
+ ; return (NonRec bndr' rhs', env') }
lvlTopBind env (Rec pairs)
= do let (bndrs,rhss) = unzip pairs
- bndrs' = [TB b (StayPut tOP_LEVEL) | b <- bndrs]
- env' = extendLvlEnv env bndrs'
- rhss' <- mapM (lvlExpr tOP_LEVEL env' . freeVars) rhss
+ (env', bndrs') = substAndLvlBndrs Recursive env tOP_LEVEL bndrs
+ rhss' <- mapM (lvlExpr env' . freeVars) rhss
return (Rec (bndrs' `zip` rhss'), env')
\end{code}
@@ -255,9 +253,8 @@ lvlTopBind env (Rec pairs)
%************************************************************************
\begin{code}
-lvlExpr :: Level -- ctxt_lvl: Level of enclosing expression
- -> LevelEnv -- Level of in-scope names/tyvars
- -> CoreExprWithFVs -- input expression
+lvlExpr :: LevelEnv -- Context
+ -> CoreExprWithFVs -- Input expression
-> LvlM LevelledExpr -- Result expression
\end{code}
@@ -277,12 +274,20 @@ don't want @lvlExpr@ to turn the scrutinee of the @case@ into an MFE
If there were another lambda in @r@'s rhs, it would get level-2 as well.
\begin{code}
-lvlExpr _ env (_, AnnType ty) = return (Type (substTy (le_subst env) ty))
-lvlExpr _ env (_, AnnCoercion co) = return (Coercion (substCo (le_subst env) co))
-lvlExpr _ env (_, AnnVar v) = return (lookupVar env v)
-lvlExpr _ _ (_, AnnLit lit) = return (Lit lit)
+lvlExpr env (_, AnnType ty) = return (Type (substTy (le_subst env) ty))
+lvlExpr env (_, AnnCoercion co) = return (Coercion (substCo (le_subst env) co))
+lvlExpr env (_, AnnVar v) = return (lookupVar env v)
+lvlExpr _ (_, AnnLit lit) = return (Lit lit)
+
+lvlExpr env (_, AnnCast expr (_, co)) = do
+ expr' <- lvlExpr env expr
+ return (Cast expr' (substCo (le_subst env) co))
+
+lvlExpr env (_, AnnTick tickish expr) = do
+ expr' <- lvlExpr env expr
+ return (Tick tickish expr')
-lvlExpr ctxt_lvl env expr@(_, AnnApp _ _) = do
+lvlExpr env expr@(_, AnnApp _ _) = do
let
(fun, args) = collectAnnArgs expr
--
@@ -296,8 +301,8 @@ lvlExpr ctxt_lvl env expr@(_, AnnApp _ _) = do
arity > 0 && arity < n_val_args ->
do
let (lapp, rargs) = left (n_val_args - arity) expr []
- rargs' <- mapM (lvlMFE False ctxt_lvl env) rargs
- lapp' <- lvlMFE False ctxt_lvl env lapp
+ rargs' <- mapM (lvlMFE False env) rargs
+ lapp' <- lvlMFE False env lapp
return (foldl App lapp' rargs')
where
n_val_args = count (isValArg . deAnnotate) args
@@ -315,32 +320,24 @@ lvlExpr ctxt_lvl env expr@(_, AnnApp _ _) = do
-- No PAPs that we can float: just carry on with the
-- arguments and the function.
_otherwise -> do
- args' <- mapM (lvlMFE False ctxt_lvl env) args
- fun' <- lvlExpr ctxt_lvl env fun
+ args' <- mapM (lvlMFE False env) args
+ fun' <- lvlExpr env fun
return (foldl App fun' args')
-lvlExpr ctxt_lvl env (_, AnnTick tickish expr) = do
- expr' <- lvlExpr ctxt_lvl env expr
- return (Tick tickish expr')
-
-lvlExpr ctxt_lvl env (_, AnnCast expr (_, co)) = do
- expr' <- lvlExpr ctxt_lvl env expr
- return (Cast expr' (substCo (le_subst env) co))
-
-- We don't split adjacent lambdas. That is, given
-- \x y -> (x+1,y)
--- we don't float to give
+-- we don't float to give
-- \x -> let v = x+y in \y -> (v,y)
-- Why not? Because partial applications are fairly rare, and splitting
-- lambdas makes them more expensive.
-lvlExpr ctxt_lvl env expr@(_, AnnLam {}) = do
- new_body <- lvlMFE True new_lvl new_env body
- return (mkLams new_bndrs new_body)
- where
+lvlExpr env expr@(_, AnnLam {})
+ = do { new_body <- lvlMFE True new_env body
+ ; return (mkLams new_bndrs new_body) }
+ where
(bndrs, body) = collectAnnBndrs expr
- (new_lvl, new_bndrs) = lvlLamBndrs ctxt_lvl bndrs
- new_env = extendLvlEnv env new_bndrs
+ (env1, bndrs1) = substBndrsSL NonRecursive env bndrs
+ (new_env, new_bndrs) = lvlLamBndrs env1 (le_ctxt_lvl env) bndrs1
-- At one time we called a special verion of collectBinders,
-- which ignored coercions, because we don't want to split
-- a lambda like this (\x -> coerce t (\s -> ...))
@@ -348,55 +345,52 @@ lvlExpr ctxt_lvl env expr@(_, AnnLam {}) = do
-- but not nearly so much now non-recursive newtypes are transparent.
-- [See SetLevels rev 1.50 for a version with this approach.]
-lvlExpr ctxt_lvl env (_, AnnLet bind body) = do
- (bind', new_lvl, new_env) <- lvlBind ctxt_lvl env bind
- body' <- lvlExpr new_lvl new_env body
- return (Let bind' body')
+lvlExpr env (_, AnnLet bind body)
+ = do { (bind', new_env) <- lvlBind env bind
+ ; body' <- lvlExpr new_env body
+ ; return (Let bind' body') }
-lvlExpr ctxt_lvl env (_, AnnCase scrut@(scrut_fvs,_) case_bndr ty alts)
- = do { scrut' <- lvlMFE True ctxt_lvl env scrut
- ; lvlCase ctxt_lvl env scrut_fvs scrut' case_bndr ty alts }
+lvlExpr env (_, AnnCase scrut@(scrut_fvs,_) case_bndr ty alts)
+ = do { scrut' <- lvlMFE True env scrut
+ ; lvlCase env scrut_fvs scrut' case_bndr ty alts }
-------------------------------------------
-lvlCase :: Level -- ctxt_lvl: Level of enclosing expression
- -> LevelEnv -- Level of in-scope names/tyvars
+lvlCase :: LevelEnv -- Level of in-scope names/tyvars
-> VarSet -- Free vars of input scrutinee
-> LevelledExpr -- Processed scrutinee
-> Id -> Type -- Case binder and result type
-> [AnnAlt Id VarSet] -- Input alternatives
-> LvlM LevelledExpr -- Result expression
-lvlCase ctxt_lvl env scrut_fvs scrut' case_bndr ty alts
- | [(con@(DataAlt {}), bs, rhs)] <- alts
+lvlCase env scrut_fvs scrut' case_bndr ty alts
+ | [(con@(DataAlt {}), bs, body)] <- alts
, exprOkForSpeculation scrut' -- See Note [Check the output scrutinee for okForSpec]
, not (isTopLvl dest_lvl) -- Can't have top-level cases
= -- See Note [Floating cases]
-- Always float the case if possible
-- Unlike lets we don't insist that it escapes a value lambda
- do { (rhs_env, (case_bndr':bs')) <- cloneVars env (case_bndr:bs) dest_lvl
+ do { (rhs_env, (case_bndr':bs')) <- cloneVars NonRecursive env dest_lvl (case_bndr:bs)
-- We don't need to use extendCaseBndrLvlEnv here
-- because we are floating the case outwards so
-- no need to do the binder-swap thing
- ; rhs' <- lvlMFE True ctxt_lvl rhs_env rhs
- ; let alt' = (con, [TB b (StayPut dest_lvl) | b <- bs'], rhs')
+ ; body' <- lvlMFE True rhs_env body
+ ; let alt' = (con, [TB b (StayPut dest_lvl) | b <- bs'], body')
; return (Case scrut' (TB case_bndr' (FloatMe dest_lvl)) ty [alt']) }
| otherwise -- Stays put
- = do { let case_bndr' = TB case_bndr bndr_spec
- alts_env = extendCaseBndrLvlEnv env scrut' case_bndr'
+ = do { let (alts_env1, [case_bndr']) = substAndLvlBndrs NonRecursive env incd_lvl [case_bndr]
+ alts_env = extendCaseBndrEnv alts_env1 case_bndr scrut'
; alts' <- mapM (lvl_alt alts_env) alts
; return (Case scrut' case_bndr' ty alts') }
where
- incd_lvl = incMinorLvl ctxt_lvl
- bndr_spec = StayPut incd_lvl
+ incd_lvl = incMinorLvl (le_ctxt_lvl env)
dest_lvl = maxFvLevel (const True) env scrut_fvs
-- Don't abstact over type variables, hence const True
lvl_alt alts_env (con, bs, rhs)
- = do { rhs' <- lvlMFE True incd_lvl new_env rhs
+ = do { rhs' <- lvlMFE True new_env rhs
; return (con, bs', rhs') }
where
- bs' = [ TB b bndr_spec | b <- bs ]
- new_env = extendLvlEnv alts_env bs'
+ (new_env, bs') = substAndLvlBndrs NonRecursive alts_env incd_lvl bs
\end{code}
Note [Floating cases]
@@ -445,56 +439,55 @@ That's why we apply exprOkForSpeculation to scrut' and not to scrut.
\begin{code}
lvlMFE :: Bool -- True <=> strict context [body of case or let]
- -> Level -- Level of innermost enclosing lambda/tylam
-> LevelEnv -- Level of in-scope names/tyvars
-> CoreExprWithFVs -- input expression
-> LvlM LevelledExpr -- Result expression
-- lvlMFE is just like lvlExpr, except that it might let-bind
-- the expression, so that it can itself be floated.
-lvlMFE _ _ env (_, AnnType ty)
+lvlMFE _ env (_, AnnType ty)
= return (Type (substTy (le_subst env) ty))
-- No point in floating out an expression wrapped in a coercion or note
-- If we do we'll transform lvl = e |> co
-- to lvl' = e; lvl = lvl' |> co
-- and then inline lvl. Better just to float out the payload.
-lvlMFE strict_ctxt ctxt_lvl env (_, AnnTick t e)
- = do { e' <- lvlMFE strict_ctxt ctxt_lvl env e
+lvlMFE strict_ctxt env (_, AnnTick t e)
+ = do { e' <- lvlMFE strict_ctxt env e
; return (Tick t e') }
-lvlMFE strict_ctxt ctxt_lvl env (_, AnnCast e (_, co))
- = do { e' <- lvlMFE strict_ctxt ctxt_lvl env e
+lvlMFE strict_ctxt env (_, AnnCast e (_, co))
+ = do { e' <- lvlMFE strict_ctxt env e
; return (Cast e' (substCo (le_subst env) co)) }
-- Note [Case MFEs]
-lvlMFE True ctxt_lvl env e@(_, AnnCase {})
- = lvlExpr ctxt_lvl env e -- Don't share cases
-
-lvlMFE strict_ctxt ctxt_lvl env ann_expr@(fvs, _)
- | isUnLiftedType ty -- Can't let-bind it; see Note [Unlifted MFEs]
- -- This includes coercions, which we don't
- -- want to float anyway
+lvlMFE True env e@(_, AnnCase {})
+ = lvlExpr env e -- Don't share cases
+
+lvlMFE strict_ctxt env ann_expr@(fvs, _)
+ | isUnLiftedType (exprType expr)
+ -- Can't let-bind it; see Note [Unlifted MFEs]
+ -- This includes coercions, which we don't want to float anyway
+ -- NB: no need to substitute cos isUnLiftedType doesn't change
|| notWorthFloating ann_expr abs_vars
|| not float_me
= -- Don't float it out
- lvlExpr ctxt_lvl env ann_expr
+ lvlExpr env ann_expr
| otherwise -- Float it out!
- = do expr' <- lvlFloatRhs abs_vars dest_lvl env ann_expr
- var <- newLvlVar abs_vars ty mb_bot
- return (Let (NonRec (TB var (FloatMe dest_lvl)) expr')
- (mkVarApps (Var var) abs_vars))
+ = do { expr' <- lvlFloatRhs abs_vars dest_lvl env ann_expr
+ ; var <- newLvlVar expr' is_bot
+ ; return (Let (NonRec (TB var (FloatMe dest_lvl)) expr')
+ (mkVarApps (Var var) abs_vars)) }
where
expr = deAnnotate ann_expr
- ty = exprType expr
- mb_bot = exprBotStrictness_maybe expr
- dest_lvl = destLevel env fvs (isFunction ann_expr) mb_bot
+ is_bot = exprIsBottom expr -- Note [Bottoming floats]
+ dest_lvl = destLevel env fvs (isFunction ann_expr) is_bot
abs_vars = abstractVars dest_lvl env fvs
-- A decision to float entails let-binding this thing, and we only do
-- that if we'll escape a value lambda, or will go to the top level.
- float_me = dest_lvl `ltMajLvl` ctxt_lvl -- Escapes a value lambda
+ float_me = dest_lvl `ltMajLvl` (le_ctxt_lvl env) -- Escapes a value lambda
-- OLD CODE: not (exprIsCheap expr) || isTopLvl dest_lvl
-- see Note [Escaping a value lambda]
@@ -542,9 +535,15 @@ Then we'd like to abstact over 'x' can float the whole arg of g:
See Maessen's paper 1999 "Bottom extraction: factoring error handling out
of functional programs" (unpublished I think).
-When we do this, we set the strictness and arity of the new bottoming
-Id, so that it's properly exposed as such in the interface file, even if
-this is all happening after strictness analysis.
+When we do this, we set the strictness and arity of the new bottoming
+Id, *immediately*, for two reasons:
+
+ * To prevent the abstracted thing being immediately inlined back in again
+ via preInlineUnconditionally. The latter has a test for bottoming Ids
+ to stop inlining them, so we'd better make sure it *is* a bottoming Id!
+
+ * So that it's properly exposed as such in the interface file, even if
+ this is all happening after strictness analysis.
Note [Bottoming floats: eta expansion] c.f Note [Bottoming floats]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -563,9 +562,11 @@ Doesn't change any other allocation at all.
\begin{code}
annotateBotStr :: Id -> Maybe (Arity, StrictSig) -> Id
+-- See Note [Bottoming floats] for why we want to add
+-- bottoming information right now
annotateBotStr id Nothing = id
annotateBotStr id (Just (arity, sig)) = id `setIdArity` arity
- `setIdStrictness` sig
+ `setIdStrictness` sig
notWorthFloating :: CoreExprWithFVs -> [Var] -> Bool
-- Returns True if the expression would be replaced by
@@ -585,6 +586,8 @@ notWorthFloating e abs_vars
go (_, AnnVar {}) n = n >= 0
go (_, AnnLit lit) n = ASSERT( n==0 )
litIsTrivial lit -- Note [Floating literals]
+ go (_, AnnTick t e) n | not (tickishIsCode t)
+ = go e n
go (_, AnnCast e _) n = go e n
go (_, AnnApp e arg) n
| (_, AnnType {}) <- arg = go e n
@@ -664,102 +667,95 @@ OLD comment was:
The binding stuff works for top level too.
\begin{code}
-lvlBind :: Level -- Context level; might be Top even for bindings
- -- nested in the RHS of a top level binding
- -> LevelEnv
+lvlBind :: LevelEnv
-> CoreBindWithFVs
- -> LvlM (LevelledBind, Level, LevelEnv)
+ -> LvlM (LevelledBind, LevelEnv)
-lvlBind ctxt_lvl env (AnnNonRec bndr rhs@(rhs_fvs,_))
+lvlBind env (AnnNonRec bndr rhs@(rhs_fvs,_))
| isTyVar bndr -- Don't do anything for TyVar binders
-- (simplifier gets rid of them pronto)
|| isCoVar bndr -- Difficult to fix up CoVar occurrences (see extendPolyLvlEnv)
-- so we will ignore this case for now
- || not (profitableFloat ctxt_lvl dest_lvl)
+ || not (profitableFloat env dest_lvl)
|| (isTopLvl dest_lvl && isUnLiftedType (idType bndr))
-- We can't float an unlifted binding to top level, so we don't
-- float it at all. It's a bit brutal, but unlifted bindings
-- aren't expensive either
= -- No float
- do rhs' <- lvlExpr ctxt_lvl env rhs
- let (env', bndr') = substLetBndrNonRec env bndr bind_lvl
- bind_lvl = incMinorLvl ctxt_lvl
- tagged_bndr = TB bndr' (StayPut bind_lvl)
- return (NonRec tagged_bndr rhs', bind_lvl, env')
+ do { rhs' <- lvlExpr env rhs
+ ; let bind_lvl = incMinorLvl (le_ctxt_lvl env)
+ (env', [bndr']) = substAndLvlBndrs NonRecursive env bind_lvl [bndr]
+ ; return (NonRec bndr' rhs', env') }
-- Otherwise we are going to float
| null abs_vars
- = do -- No type abstraction; clone existing binder
- rhs' <- lvlExpr dest_lvl env rhs
- (env', bndr') <- cloneVar env bndr dest_lvl
- return (NonRec (TB bndr' (FloatMe dest_lvl)) rhs', ctxt_lvl, env')
+ = do { -- No type abstraction; clone existing binder
+ rhs' <- lvlExpr (setCtxtLvl env dest_lvl) rhs
+ ; (env', [bndr']) <- cloneVars NonRecursive env dest_lvl [bndr]
+ ; return (NonRec (TB bndr' (FloatMe dest_lvl)) rhs', env') }
| otherwise
- = do -- Yes, type abstraction; create a new binder, extend substitution, etc
- rhs' <- lvlFloatRhs abs_vars dest_lvl env rhs
- (env', [bndr']) <- newPolyBndrs dest_lvl env abs_vars [bndr_w_str]
- return (NonRec (TB bndr' (FloatMe dest_lvl)) rhs', ctxt_lvl, env')
+ = do { -- Yes, type abstraction; create a new binder, extend substitution, etc
+ rhs' <- lvlFloatRhs abs_vars dest_lvl env rhs
+ ; (env', [bndr']) <- newPolyBndrs dest_lvl env abs_vars [bndr]
+ ; return (NonRec (TB bndr' (FloatMe dest_lvl)) rhs', env') }
where
bind_fvs = rhs_fvs `unionVarSet` idFreeVars bndr
abs_vars = abstractVars dest_lvl env bind_fvs
- dest_lvl = destLevel env bind_fvs (isFunction rhs) mb_bot
- mb_bot = exprBotStrictness_maybe (deAnnotate rhs)
- bndr_w_str = annotateBotStr bndr mb_bot
-
-lvlBind ctxt_lvl env (AnnRec pairs)
- | not (profitableFloat ctxt_lvl dest_lvl)
- = do let bind_lvl = incMinorLvl ctxt_lvl
- (env', bndrs') = substLetBndrsRec env bndrs bind_lvl
- tagged_bndrs = [ TB bndr' (StayPut bind_lvl)
- | bndr' <- bndrs' ]
- rhss' <- mapM (lvlExpr bind_lvl env') rhss
- return (Rec (tagged_bndrs `zip` rhss'), bind_lvl, env')
-
- | null abs_vars
- = do (new_env, new_bndrs) <- cloneRecVars env bndrs dest_lvl
- new_rhss <- mapM (lvlExpr ctxt_lvl new_env) rhss
- return ( Rec ([TB b (FloatMe dest_lvl) | b <- new_bndrs] `zip` new_rhss)
- , ctxt_lvl, new_env)
+ dest_lvl = destLevel env bind_fvs (isFunction rhs) is_bot
+ is_bot = exprIsBottom (deAnnotate rhs)
+
+lvlBind env (AnnRec pairs)
+ | not (profitableFloat env dest_lvl)
+ = do { let bind_lvl = incMinorLvl (le_ctxt_lvl env)
+ (env', bndrs') = substAndLvlBndrs Recursive env bind_lvl bndrs
+ ; rhss' <- mapM (lvlExpr env') rhss
+ ; return (Rec (bndrs' `zip` rhss'), env') }
+
+ | null abs_vars
+ = do { (new_env, new_bndrs) <- cloneVars Recursive env dest_lvl bndrs
+ ; new_rhss <- mapM (lvlExpr (setCtxtLvl new_env dest_lvl)) rhss
+ ; return ( Rec ([TB b (FloatMe dest_lvl) | b <- new_bndrs] `zip` new_rhss)
+ , new_env) }
-- ToDo: when enabling the floatLambda stuff,
-- I think we want to stop doing this
- | isSingleton pairs && count isId abs_vars > 1
+ | [(bndr,rhs)] <- pairs
+ , count isId abs_vars > 1
= do -- Special case for self recursion where there are
- -- several variables carried around: build a local loop:
+ -- several variables carried around: build a local loop:
-- poly_f = \abs_vars. \lam_vars . letrec f = \lam_vars. rhs in f lam_vars
-- This just makes the closures a bit smaller. If we don't do
-- this, allocation rises significantly on some programs
--
-- We could elaborate it for the case where there are several
-- mutually functions, but it's quite a bit more complicated
- --
+ --
-- This all seems a bit ad hoc -- sigh
+ let (rhs_env, abs_vars_w_lvls) = lvlLamBndrs env dest_lvl abs_vars
+ rhs_lvl = le_ctxt_lvl rhs_env
+
+ (rhs_env', [new_bndr]) <- cloneVars Recursive rhs_env rhs_lvl [bndr]
let
- (bndr,rhs) = head pairs
- (rhs_lvl, abs_vars_w_lvls) = lvlLamBndrs dest_lvl abs_vars
- rhs_env = extendLvlEnv env abs_vars_w_lvls
- (rhs_env', new_bndr) <- cloneVar rhs_env bndr rhs_lvl
- let
- (lam_bndrs, rhs_body) = collectAnnBndrs rhs
- (body_lvl, new_lam_bndrs) = lvlLamBndrs rhs_lvl lam_bndrs
- body_env = extendLvlEnv rhs_env' new_lam_bndrs
- new_rhs_body <- lvlExpr body_lvl body_env rhs_body
+ (lam_bndrs, rhs_body) = collectAnnBndrs rhs
+ (body_env1, lam_bndrs1) = substBndrsSL NonRecursive rhs_env' lam_bndrs
+ (body_env2, lam_bndrs2) = lvlLamBndrs body_env1 rhs_lvl lam_bndrs1
+ new_rhs_body <- lvlExpr body_env2 rhs_body
(poly_env, [poly_bndr]) <- newPolyBndrs dest_lvl env abs_vars [bndr]
- return (Rec [(TB poly_bndr (FloatMe dest_lvl)
+ return (Rec [(TB poly_bndr (FloatMe dest_lvl)
, mkLams abs_vars_w_lvls $
- mkLams new_lam_bndrs $
+ mkLams lam_bndrs2 $
Let (Rec [( TB new_bndr (StayPut rhs_lvl)
- , mkLams new_lam_bndrs new_rhs_body)])
- (mkVarApps (Var new_bndr) lam_bndrs))]
- , ctxt_lvl
+ , mkLams lam_bndrs2 new_rhs_body)])
+ (mkVarApps (Var new_bndr) lam_bndrs1))]
, poly_env)
- | otherwise = do -- Non-null abs_vars
- (new_env, new_bndrs) <- newPolyBndrs dest_lvl env abs_vars bndrs
- new_rhss <- mapM (lvlFloatRhs abs_vars dest_lvl new_env) rhss
- return ( Rec ([TB b (FloatMe dest_lvl) | b <- new_bndrs] `zip` new_rhss)
- , ctxt_lvl, new_env)
+ | otherwise -- Non-null abs_vars
+ = do { (new_env, new_bndrs) <- newPolyBndrs dest_lvl env abs_vars bndrs
+ ; new_rhss <- mapM (lvlFloatRhs abs_vars dest_lvl new_env) rhss
+ ; return ( Rec ([TB b (FloatMe dest_lvl) | b <- new_bndrs] `zip` new_rhss)
+ , new_env) }
where
(bndrs,rhss) = unzip pairs
@@ -770,25 +766,24 @@ lvlBind ctxt_lvl env (AnnRec pairs)
`minusVarSet`
mkVarSet bndrs
- dest_lvl = destLevel env bind_fvs (all isFunction rhss) Nothing
+ dest_lvl = destLevel env bind_fvs (all isFunction rhss) False
abs_vars = abstractVars dest_lvl env bind_fvs
-profitableFloat :: Level -> Level -> Bool
-profitableFloat ctxt_lvl dest_lvl
- = (dest_lvl `ltMajLvl` ctxt_lvl) -- Escapes a value lambda
- || isTopLvl dest_lvl -- Going all the way to top level
+profitableFloat :: LevelEnv -> Level -> Bool
+profitableFloat env dest_lvl
+ = (dest_lvl `ltMajLvl` le_ctxt_lvl env) -- Escapes a value lambda
+ || isTopLvl dest_lvl -- Going all the way to top level
----------------------------------------------------
-- Three help functions for the type-abstraction case
-lvlFloatRhs :: [CoreBndr] -> Level -> LevelEnv -> CoreExprWithFVs
+lvlFloatRhs :: [OutVar] -> Level -> LevelEnv -> CoreExprWithFVs
-> UniqSM (Expr LevelledBndr)
-lvlFloatRhs abs_vars dest_lvl env rhs = do
- rhs' <- lvlExpr rhs_lvl rhs_env rhs
- return (mkLams abs_vars_w_lvls rhs')
+lvlFloatRhs abs_vars dest_lvl env rhs
+ = do { rhs' <- lvlExpr rhs_env rhs
+ ; return (mkLams abs_vars_w_lvls rhs') }
where
- (rhs_lvl, abs_vars_w_lvls) = lvlLamBndrs dest_lvl abs_vars
- rhs_env = extendLvlEnv env abs_vars_w_lvls
+ (rhs_env, abs_vars_w_lvls) = lvlLamBndrs env dest_lvl abs_vars
\end{code}
@@ -799,18 +794,27 @@ lvlFloatRhs abs_vars dest_lvl env rhs = do
%************************************************************************
\begin{code}
-lvlLamBndrs :: Level -> [CoreBndr] -> (Level, [LevelledBndr])
+substAndLvlBndrs :: RecFlag -> LevelEnv -> Level -> [InVar] -> (LevelEnv, [LevelledBndr])
+substAndLvlBndrs is_rec env lvl bndrs
+ = lvlBndrs subst_env lvl subst_bndrs
+ where
+ (subst_env, subst_bndrs) = substBndrsSL is_rec env bndrs
+
+substBndrsSL :: RecFlag -> LevelEnv -> [InVar] -> (LevelEnv, [OutVar])
+-- So named only to avoid the name clash with CoreSubst.substBndrs
+substBndrsSL is_rec env@(LE { le_subst = subst, le_env = id_env }) bndrs
+ = ( env { le_subst = subst'
+ , le_env = foldl add_id id_env (bndrs `zip` bndrs') }
+ , bndrs')
+ where
+ (subst', bndrs') = case is_rec of
+ NonRecursive -> substBndrs subst bndrs
+ Recursive -> substRecBndrs subst bndrs
+
+lvlLamBndrs :: LevelEnv -> Level -> [OutVar] -> (LevelEnv, [LevelledBndr])
-- Compute the levels for the binders of a lambda group
--- The binders returned are exactly the same as the ones passed,
--- but they are now paired with a level
-lvlLamBndrs lvl []
- = (lvl, [])
-
-lvlLamBndrs lvl bndrs
- = (new_lvl, [TB bndr (StayPut new_lvl) | bndr <- bndrs])
- -- All the new binders get the same level, because
- -- any floating binding is either going to float past
- -- all or none. We never separate binders
+lvlLamBndrs env lvl bndrs
+ = lvlBndrs env new_lvl bndrs
where
new_lvl | any is_major bndrs = incMajorLvl lvl
| otherwise = incMinorLvl lvl
@@ -818,16 +822,37 @@ lvlLamBndrs lvl bndrs
is_major bndr = isId bndr && not (isProbablyOneShotLambda bndr)
-- The "probably" part says "don't float things out of a
-- probable one-shot lambda"
+
+
+lvlBndrs :: LevelEnv -> Level -> [CoreBndr] -> (LevelEnv, [LevelledBndr])
+-- The binders returned are exactly the same as the ones passed,
+-- apart from applying the substitution, but they are now paired
+-- with a (StayPut level)
+--
+-- The returned envt has ctxt_lvl updated to the new_lvl
+--
+-- All the new binders get the same level, because
+-- any floating binding is either going to float past
+-- all or none. We never separate binders.
+lvlBndrs env@(LE { le_lvl_env = lvl_env }) new_lvl bndrs
+ = ( env { le_ctxt_lvl = new_lvl
+ , le_lvl_env = foldl add_lvl lvl_env bndrs }
+ , lvld_bndrs)
+ where
+ lvld_bndrs = [TB bndr (StayPut new_lvl) | bndr <- bndrs]
+ add_lvl env v = extendVarEnv env v new_lvl
\end{code}
\begin{code}
-- Destination level is the max Id level of the expression
-- (We'll abstract the type variables, if any.)
-destLevel :: LevelEnv -> VarSet -> Bool ->
- Maybe (Arity, StrictSig) -> Level
-destLevel env fvs is_function mb_bot
- | Just {} <- mb_bot = tOP_LEVEL -- Send bottoming bindings to the top
- -- regardless; see Note [Bottoming floats]
+destLevel :: LevelEnv -> VarSet
+ -> Bool -- True <=> is function
+ -> Bool -- True <=> is bottom
+ -> Level
+destLevel env fvs is_function is_bot
+ | is_bot = tOP_LEVEL -- Send bottoming bindings to the top
+ -- regardless; see Note [Bottoming floats]
| Just n_args <- floatLams env
, n_args > 0 -- n=0 case handled uniformly by the 'otherwise' case
, is_function
@@ -874,17 +899,22 @@ countFreeIds = foldVarSet add 0
%************************************************************************
\begin{code}
-data LevelEnv
+type InVar = Var -- Pre cloning
+type InId = Id -- Pre cloning
+type OutVar = Var -- Post cloning
+type OutId = Id -- Post cloning
+
+data LevelEnv
= LE { le_switches :: FloatOutSwitches
+ , le_ctxt_lvl :: Level -- The current level
, le_lvl_env :: VarEnv Level -- Domain is *post-cloned* TyVars and Ids
- , le_subst :: Subst -- Domain is pre-cloned Ids; tracks the in-scope set
- -- so that substitution is capture-avoiding
+ , le_subst :: Subst -- Domain is pre-cloned TyVars and Ids
-- The Id -> CoreExpr in the Subst is ignored
- -- (since we want to substitute in LevelledExpr
- -- instead) but we do use the Co/TyVar substs
- , le_env :: IdEnv ([Var], LevelledExpr) -- Domain is pre-cloned Ids
+ -- (since we want to substitute a LevelledExpr for
+ -- an Id via le_env) but we do use the Co/TyVar substs
+ , le_env :: IdEnv ([OutVar], LevelledExpr) -- Domain is pre-cloned Ids
}
- -- We clone let-bound variables so that they are still
+ -- We clone let- and case-bound variables so that they are still
-- distinct when floated out; hence the le_subst/le_env.
-- (see point 3 of the module overview comment).
-- We also use these envs when making a variable polymorphic
@@ -906,9 +936,12 @@ data LevelEnv
-- The domain of the le_lvl_env is the *post-cloned* Ids
initialEnv :: FloatOutSwitches -> LevelEnv
-initialEnv float_lams
- = LE { le_switches = float_lams, le_lvl_env = emptyVarEnv
- , le_subst = emptySubst, le_env = emptyVarEnv }
+initialEnv float_lams
+ = LE { le_switches = float_lams
+ , le_ctxt_lvl = tOP_LEVEL
+ , le_lvl_env = emptyVarEnv
+ , le_subst = emptySubst
+ , le_env = emptyVarEnv }
floatLams :: LevelEnv -> Maybe Int
floatLams le = floatOutLambdas (le_switches le)
@@ -919,67 +952,20 @@ floatConsts le = floatOutConstants (le_switches le)
floatPAPs :: LevelEnv -> Bool
floatPAPs le = floatOutPartialApplications (le_switches le)
-extendLvlEnv :: LevelEnv -> [LevelledBndr] -> LevelEnv
--- Used when *not* cloning
-extendLvlEnv le@(LE { le_lvl_env = lvl_env, le_subst = subst, le_env = id_env })
- prs
- = le { le_lvl_env = foldl add_lvl lvl_env prs
- , le_subst = foldl del_subst subst prs
- , le_env = foldl del_id id_env prs }
- where
- add_lvl env (TB v s) = extendVarEnv env v (floatSpecLevel s)
- del_subst env (TB v _) = extendInScope env v
- del_id env (TB v _) = delVarEnv env v
- -- We must remove any clone for this variable name in case of
- -- shadowing. This bit me in the following case
- -- (in nofib/real/gg/Spark.hs):
- --
- -- case ds of wild {
- -- ... -> case e of wild {
- -- ... -> ... wild ...
- -- }
- -- }
- --
- -- The inside occurrence of @wild@ was being replaced with @ds@,
- -- incorrectly, because the SubstEnv was still lying around. Ouch!
- -- KSW 2000-07.
+setCtxtLvl :: LevelEnv -> Level -> LevelEnv
+setCtxtLvl env lvl = env { le_ctxt_lvl = lvl }
-- extendCaseBndrLvlEnv adds the mapping case-bndr->scrut-var if it can
-- (see point 4 of the module overview comment)
-extendCaseBndrLvlEnv :: LevelEnv -> Expr LevelledBndr
- -> LevelledBndr -> LevelEnv
-extendCaseBndrLvlEnv le@(LE { le_subst = subst, le_env = id_env })
- (Var scrut_var) (TB case_bndr _)
+extendCaseBndrEnv :: LevelEnv
+ -> Id -- Pre-cloned case binder
+ -> Expr LevelledBndr -- Post-cloned scrutinee
+ -> LevelEnv
+extendCaseBndrEnv le@(LE { le_subst = subst, le_env = id_env })
+ case_bndr (Var scrut_var)
= le { le_subst = extendSubstWithVar subst case_bndr scrut_var
- , le_env = extendVarEnv id_env case_bndr ([scrut_var], ASSERT(not (isCoVar scrut_var)) Var scrut_var) }
-
-extendCaseBndrLvlEnv env _scrut case_bndr
- = extendLvlEnv env [case_bndr]
-
-extendPolyLvlEnv :: Level -> LevelEnv -> [Var] -> [(Var {- :: t -}, Var {- :: mkPiTypes abs_vars t -})] -> LevelEnv
-extendPolyLvlEnv dest_lvl
- le@(LE { le_lvl_env = lvl_env, le_subst = subst, le_env = id_env })
- abs_vars bndr_pairs
- = ASSERT( all (not . isCoVar . fst) bndr_pairs ) -- What would we add to the CoSubst in this case. No easy answer, so avoid floating
- le { le_lvl_env = foldl add_lvl lvl_env bndr_pairs
- , le_subst = foldl add_subst subst bndr_pairs
- , le_env = foldl add_id id_env bndr_pairs }
- where
- add_lvl env (_, v') = extendVarEnv env v' dest_lvl
- add_subst env (v, v') = extendIdSubst env v (mkVarApps (Var v') abs_vars)
- add_id env (v, v') = extendVarEnv env v ((v':abs_vars), mkVarApps (Var v') abs_vars)
-
-extendCloneLvlEnv :: Level -> LevelEnv -> Subst -> [(Var, Var)] -> LevelEnv
-extendCloneLvlEnv lvl le@(LE { le_lvl_env = lvl_env, le_env = id_env })
- new_subst bndr_pairs
- = le { le_lvl_env = foldl add_lvl lvl_env bndr_pairs
- , le_subst = new_subst
- , le_env = foldl add_id id_env bndr_pairs }
- where
- add_lvl env (_, v_cloned) = extendVarEnv env v_cloned lvl
- add_id env (v, v_cloned) = if isTyVar v
- then delVarEnv env v
- else extendVarEnv env v ([v_cloned], ASSERT(not (isCoVar v_cloned)) Var v_cloned)
+ , le_env = add_id id_env (case_bndr, scrut_var) }
+extendCaseBndrEnv env _ _ = env
maxFvLevel :: (Var -> Bool) -> LevelEnv -> VarSet -> Level
maxFvLevel max_me (LE { le_lvl_env = lvl_env, le_env = id_env }) var_set
@@ -1001,17 +987,17 @@ lookupVar le v = case lookupVarEnv (le_env le) v of
Just (_, expr) -> expr
_ -> Var v
-abstractVars :: Level -> LevelEnv -> VarSet -> [Var]
+abstractVars :: Level -> LevelEnv -> VarSet -> [OutVar]
-- Find the variables in fvs, free vars of the target expresion,
-- whose level is greater than the destination level
-- These are the ones we are going to abstract out
-abstractVars dest_lvl (LE { le_lvl_env = lvl_env, le_env = id_env }) fvs
+abstractVars dest_lvl (LE { le_subst = subst, le_lvl_env = lvl_env }) in_fvs
= map zap $ uniq $ sortQuantVars
- [var | fv <- varSetElems fvs
- , var <- varSetElems (absVarsOf id_env fv)
- , abstract_me var ]
+ [out_var | out_fv <- varSetElems (substVarSet subst in_fvs)
+ , out_var <- varSetElems (close out_fv)
+ , abstract_me out_var ]
-- NB: it's important to call abstract_me only on the OutIds the
- -- come from absVarsOf (not on fv, which is an InId)
+ -- come from substVarSet (not on fv, which is an InId)
where
uniq :: [Var] -> [Var]
-- Remove adjacent duplicates; the sort will have brought them together
@@ -1031,21 +1017,8 @@ abstractVars dest_lvl (LE { le_lvl_env = lvl_env, le_env = id_env }) fvs
setIdInfo v vanillaIdInfo
| otherwise = v
-absVarsOf :: IdEnv ([Var], LevelledExpr) -> Var -> VarSet
- -- If f is free in the expression, and f maps to poly_f a b c in the
- -- current substitution, then we must report a b c as candidate type
- -- variables
- --
- -- Also, if x::a is an abstracted variable, then so is a; that is,
- -- we must look in x's type. What's more, if a mentions kind variables,
- -- we must also return those.
-absVarsOf id_env v
- | isId v, Just (abs_vars, _) <- lookupVarEnv id_env v
- = foldr (unionVarSet . close) emptyVarSet abs_vars
- | otherwise
- = close v
- where
- close :: Var -> VarSet -- Result include the input variable itself
+ close :: Var -> VarSet -- Close over variables free in the type
+ -- Result includes the input variable itself
close v = foldVarSet (unionVarSet . close)
(unitVarSet v)
(varTypeTyVars v)
@@ -1060,84 +1033,76 @@ initLvl = initUs_
\begin{code}
-newPolyBndrs :: Level -> LevelEnv -> [Var] -> [Id] -> UniqSM (LevelEnv, [Id])
-newPolyBndrs dest_lvl env abs_vars bndrs = do
- uniqs <- getUniquesM
- let new_bndrs = zipWith mk_poly_bndr bndrs uniqs
- return (extendPolyLvlEnv dest_lvl env abs_vars (bndrs `zip` new_bndrs), new_bndrs)
+newPolyBndrs :: Level -> LevelEnv -> [OutVar] -> [InId] -> UniqSM (LevelEnv, [OutId])
+-- The envt is extended to bind the new bndrs to dest_lvl, but
+-- the ctxt_lvl is unaffected
+newPolyBndrs dest_lvl
+ env@(LE { le_lvl_env = lvl_env, le_subst = subst, le_env = id_env })
+ abs_vars bndrs
+ = ASSERT( all (not . isCoVar) bndrs ) -- What would we add to the CoSubst in this case. No easy answer.
+ do { uniqs <- getUniquesM
+ ; let new_bndrs = zipWith mk_poly_bndr bndrs uniqs
+ bndr_prs = bndrs `zip` new_bndrs
+ env' = env { le_lvl_env = foldl add_lvl lvl_env new_bndrs
+ , le_subst = foldl add_subst subst bndr_prs
+ , le_env = foldl add_id id_env bndr_prs }
+ ; return (env', new_bndrs) }
where
+ add_lvl env v' = extendVarEnv env v' dest_lvl
+ add_subst env (v, v') = extendIdSubst env v (mkVarApps (Var v') abs_vars)
+ add_id env (v, v') = extendVarEnv env v ((v':abs_vars), mkVarApps (Var v') abs_vars)
+
mk_poly_bndr bndr uniq = transferPolyIdInfo bndr abs_vars $ -- Note [transferPolyIdInfo] in Id.lhs
mkSysLocal (mkFastString str) uniq poly_ty
where
str = "poly_" ++ occNameString (getOccName bndr)
- poly_ty = mkPiTypes abs_vars (idType bndr)
+ poly_ty = mkPiTypes abs_vars (substTy subst (idType bndr))
-newLvlVar :: [CoreBndr] -> Type -- Abstract wrt these bndrs
- -> Maybe (Arity, StrictSig) -- Note [Bottoming floats]
+newLvlVar :: LevelledExpr -- The RHS of the new binding
+ -> Bool -- Whether it is bottom
-> LvlM Id
-newLvlVar vars body_ty mb_bot
+newLvlVar lvld_rhs is_bot
= do { uniq <- getUniqueM
- ; return (mkLocalIdWithInfo (mk_name uniq) (mkPiTypes vars body_ty) info) }
+ ; return (add_bot_info (mkLocalId (mk_name uniq) rhs_ty)) }
where
+ add_bot_info var -- We could call annotateBotStr always, but the is_bot
+ -- flag just tells us when we don't need to do so
+ | is_bot = annotateBotStr var (exprBotStrictness_maybe de_tagged_rhs)
+ | otherwise = var
+ de_tagged_rhs = deTagExpr lvld_rhs
+ rhs_ty = exprType de_tagged_rhs
mk_name uniq = mkSystemVarName uniq (mkFastString "lvl")
- arity = count isId vars
- info = case mb_bot of
- Nothing -> vanillaIdInfo
- Just (bot_arity, sig) ->
- vanillaIdInfo
- `setArityInfo` (arity + bot_arity)
- `setStrictnessInfo` (increaseStrictSigArity arity sig)
-
--- The deeply tiresome thing is that we have to apply the substitution
--- to the rules inside each Id. Grr. But it matters.
-
-substLetBndrNonRec :: LevelEnv -> Id -> Level -> (LevelEnv, Id)
-substLetBndrNonRec
- le@(LE { le_lvl_env = lvl_env, le_subst = subst, le_env = id_env })
- bndr bind_lvl
- = ASSERT( isId bndr )
- (env', bndr' )
- where
- (subst', bndr') = substBndr subst bndr
- env' = le { le_lvl_env = extendVarEnv lvl_env bndr bind_lvl
- , le_subst = subst'
- , le_env = delVarEnv id_env bndr }
-
-substLetBndrsRec :: LevelEnv -> [Id] -> Level -> (LevelEnv, [Id])
-substLetBndrsRec
- le@(LE { le_lvl_env = lvl_env, le_subst = subst, le_env = id_env })
- bndrs bind_lvl
- = ASSERT( all isId bndrs )
- (env', bndrs')
+
+cloneVars :: RecFlag -> LevelEnv -> Level -> [Var] -> LvlM (LevelEnv, [Var])
+-- Works for Ids, TyVars and CoVars
+-- The dest_lvl is attributed to the binders in the new env,
+-- but cloneVars doesn't affect the ctxt_lvl of the incoming env
+cloneVars is_rec
+ env@(LE { le_subst = subst, le_lvl_env = lvl_env, le_env = id_env })
+ dest_lvl vs
+ = do { us <- getUniqueSupplyM
+ ; let (subst', vs1) = case is_rec of
+ NonRecursive -> cloneBndrs subst us vs
+ Recursive -> cloneRecIdBndrs subst us vs
+ vs2 = map zap_demand_info vs1 -- See Note [Zapping the demand info]
+ prs = vs `zip` vs2
+ env' = env { le_lvl_env = foldl add_lvl lvl_env vs2
+ , le_subst = subst'
+ , le_env = foldl add_id id_env prs }
+
+ ; return (env', vs2) }
where
- (subst', bndrs') = substRecBndrs subst bndrs
- env' = le { le_lvl_env = extendVarEnvList lvl_env [(b,bind_lvl) | b <- bndrs]
- , le_subst = subst'
- , le_env = delVarEnvList id_env bndrs }
-
-cloneVar :: LevelEnv -> Var -> Level -> LvlM (LevelEnv, Var)
-cloneVar env v dest_lvl -- Works for Ids, TyVars and CoVars
- = do { u <- getUniqueM
- ; let (subst', v1) = cloneBndr (le_subst env) u v
- v2 = if isId v1
- then zapDemandIdInfo v1
- else v1
- env' = extendCloneLvlEnv dest_lvl env subst' [(v,v2)]
- ; return (env', v2) }
-
-cloneVars :: LevelEnv -> [Var] -> Level -> LvlM (LevelEnv, [Var])
-cloneVars env vs dest_lvl = mapAccumLM (\env v -> cloneVar env v dest_lvl) env vs
-
-cloneRecVars :: LevelEnv -> [Id] -> Level -> LvlM (LevelEnv, [Id])
-cloneRecVars env vs dest_lvl -- Works for CoVars too (since cloneRecIdBndrs does)
- = ASSERT( all isId vs ) do
- us <- getUniqueSupplyM
- let
- (subst', vs1) = cloneRecIdBndrs (le_subst env) us vs
- -- Note [Zapping the demand info]
- vs2 = map zapDemandIdInfo vs1
- env' = extendCloneLvlEnv dest_lvl env subst' (vs `zip` vs2)
- return (env', vs2)
+ add_lvl env v_cloned = extendVarEnv env v_cloned dest_lvl
+
+add_id :: IdEnv ([Var], LevelledExpr) -> (Var, Var) -> IdEnv ([Var], LevelledExpr)
+add_id id_env (v, v1)
+ | isTyVar v = delVarEnv id_env v
+ | otherwise = extendVarEnv id_env v ([v1], ASSERT(not (isCoVar v1)) Var v1)
+
+zap_demand_info :: Var -> Var
+zap_demand_info v
+ | isId v = zapDemandIdInfo v
+ | otherwise = v
\end{code}
Note [Zapping the demand info]
@@ -1149,4 +1114,3 @@ binding site. Eg
f x = let v = 3*4 in v+x
Here v is strict; but if we float v to top level, it isn't any more.
-
diff --git a/compiler/simplCore/SimplCore.lhs b/compiler/simplCore/SimplCore.lhs
index c487c984919f..59b39a9c60d7 100644
--- a/compiler/simplCore/SimplCore.lhs
+++ b/compiler/simplCore/SimplCore.lhs
@@ -4,6 +4,8 @@
\section[SimplCore]{Driver for simplifying @Core@ programs}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module SimplCore ( core2core, simplifyExpr ) where
#include "HsVersions.h"
@@ -37,6 +39,7 @@ import SAT ( doStaticArgs )
import Specialise ( specProgram)
import SpecConstr ( specConstrProgram)
import DmdAnal ( dmdAnalProgram )
+import CallArity ( callArityAnalProgram )
import WorkWrap ( wwTopBinds )
import Vectorise ( vectorise )
import FastString
@@ -114,6 +117,7 @@ getCoreToDo dflags
phases = simplPhases dflags
max_iter = maxSimplIterations dflags
rule_check = ruleCheck dflags
+ call_arity = gopt Opt_CallArity dflags
strictness = gopt Opt_Strictness dflags
full_laziness = gopt Opt_FullLaziness dflags
do_specialise = gopt Opt_Specialise dflags
@@ -258,6 +262,11 @@ getCoreToDo dflags
-- Don't stop now!
simpl_phase 0 ["main"] (max max_iter 3),
+ runWhen call_arity $ CoreDoPasses
+ [ CoreDoCallArity
+ , simpl_phase 0 ["post-call-arity"] max_iter
+ ],
+
runWhen strictness demand_analyser,
runWhen full_laziness $
@@ -396,6 +405,9 @@ doCorePass (CoreDoFloatOutwards f) = {-# SCC "FloatOutwards" #-}
doCorePass CoreDoStaticArgs = {-# SCC "StaticArgs" #-}
doPassU doStaticArgs
+doCorePass CoreDoCallArity = {-# SCC "CallArity" #-}
+ doPassD callArityAnalProgram
+
doCorePass CoreDoStrictness = {-# SCC "NewStranal" #-}
doPassDFM dmdAnalProgram
diff --git a/compiler/simplCore/SimplEnv.lhs b/compiler/simplCore/SimplEnv.lhs
index 5f1013def893..71021a0179c8 100644
--- a/compiler/simplCore/SimplEnv.lhs
+++ b/compiler/simplCore/SimplEnv.lhs
@@ -4,6 +4,8 @@
\section[SimplMonad]{The simplifier Monad}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module SimplEnv (
InId, InBind, InExpr, InAlt, InArg, InType, InBndr, InVar,
OutId, OutTyVar, OutBind, OutExpr, OutAlt, OutArg, OutType, OutBndr, OutVar,
@@ -29,8 +31,8 @@ module SimplEnv (
-- Floats
Floats, emptyFloats, isEmptyFloats, addNonRec, addFloats, extendFloats,
- wrapFloats, floatBinds, setFloats, zapFloats, addRecFloats,
- doFloatFromRhs, getFloatBinds, getFloats, mapFloats
+ wrapFloats, setFloats, zapFloats, addRecFloats, mapFloats,
+ doFloatFromRhs, getFloatBinds
) where
#include "HsVersions.h"
@@ -45,7 +47,7 @@ import VarEnv
import VarSet
import OrdList
import Id
-import MkCore
+import MkCore ( mkWildValBinder )
import TysWiredIn
import qualified CoreSubst
import qualified Type
@@ -342,15 +344,21 @@ Note [Simplifier floats]
~~~~~~~~~~~~~~~~~~~~~~~~~
The Floats is a bunch of bindings, classified by a FloatFlag.
+* All of them satisfy the let/app invariant
+
+Examples
+
NonRec x (y:ys) FltLifted
Rec [(x,rhs)] FltLifted
+ NonRec x* (p:q) FltOKSpec -- RHS is WHNF. Question: why not FltLifted?
NonRec x# (y +# 3) FltOkSpec -- Unboxed, but ok-for-spec'n
- NonRec x# (a /# b) FltCareful
NonRec x* (f y) FltCareful -- Strict binding; might fail or diverge
- NonRec x# (f y) FltCareful -- Unboxed binding: might fail or diverge
- -- (where f :: Int -> Int#)
+
+Can't happen:
+ NonRec x# (a /# b) -- Might fail; does not satisfy let/app
+ NonRec x# (f y) -- Might diverge; does not satisfy let/app
\begin{code}
data Floats = Floats (OrdList OutBind) FloatFlag
@@ -386,13 +394,6 @@ andFF FltOkSpec FltCareful = FltCareful
andFF FltOkSpec _ = FltOkSpec
andFF FltLifted flt = flt
-classifyFF :: CoreBind -> FloatFlag
-classifyFF (Rec _) = FltLifted
-classifyFF (NonRec bndr rhs)
- | not (isStrictId bndr) = FltLifted
- | exprOkForSpeculation rhs = FltOkSpec
- | otherwise = FltCareful
-
doFloatFromRhs :: TopLevelFlag -> RecFlag -> Bool -> OutExpr -> SimplEnv -> Bool
-- If you change this function look also at FloatIn.noFloatFromRhs
doFloatFromRhs lvl rec str rhs (SimplEnv {seFloats = Floats fs ff})
@@ -421,8 +422,16 @@ emptyFloats :: Floats
emptyFloats = Floats nilOL FltLifted
unitFloat :: OutBind -> Floats
--- A single-binding float
-unitFloat bind = Floats (unitOL bind) (classifyFF bind)
+-- This key function constructs a singleton float with the right form
+unitFloat bind = Floats (unitOL bind) (flag bind)
+ where
+ flag (Rec {}) = FltLifted
+ flag (NonRec bndr rhs)
+ | not (isStrictId bndr) = FltLifted
+ | exprOkForSpeculation rhs = FltOkSpec -- Unlifted, and lifted but ok-for-spec (eg HNF)
+ | otherwise = ASSERT2( not (isUnLiftedType (idType bndr)), ppr bndr )
+ FltCareful
+ -- Unlifted binders can only be let-bound if exprOkForSpeculation holds
addNonRec :: SimplEnv -> OutId -> OutExpr -> SimplEnv
-- Add a non-recursive binding and extend the in-scope set
@@ -435,13 +444,6 @@ addNonRec env id rhs
env { seFloats = seFloats env `addFlts` unitFloat (NonRec id rhs),
seInScope = extendInScopeSet (seInScope env) id }
-mapFloats :: SimplEnv -> ((Id,CoreExpr) -> (Id,CoreExpr)) -> SimplEnv
-mapFloats env@SimplEnv { seFloats = Floats fs ff } fun
- = env { seFloats = Floats (mapOL app fs) ff }
- where
- app (NonRec b e) = case fun (b,e) of (b',e') -> NonRec b' e'
- app (Rec bs) = Rec (map fun bs)
-
extendFloats :: SimplEnv -> OutBind -> SimplEnv
-- Add these bindings to the floats, and extend the in-scope env too
extendFloats env bind
@@ -475,32 +477,31 @@ addRecFloats env1 env2@(SimplEnv {seFloats = Floats bs ff})
env2 {seFloats = seFloats env1 `addFlts` unitFloat (Rec (flattenBinds (fromOL bs)))}
wrapFloats :: SimplEnv -> OutExpr -> OutExpr
-wrapFloats env expr = wrapFlts (seFloats env) expr
-
-wrapFlts :: Floats -> OutExpr -> OutExpr
--- Wrap the floats around the expression, using case-binding where necessary
-wrapFlts (Floats bs _) body = foldrOL wrap body bs
- where
- wrap (Rec prs) body = Let (Rec prs) body
- wrap (NonRec b r) body = bindNonRec b r body
+-- Wrap the floats around the expression; they should all
+-- satisfy the let/app invariant, so mkLets should do the job just fine
+wrapFloats (SimplEnv {seFloats = Floats bs _}) body
+ = foldrOL Let body bs
getFloatBinds :: SimplEnv -> [CoreBind]
-getFloatBinds env = floatBinds (seFloats env)
-
-getFloats :: SimplEnv -> Floats
-getFloats env = seFloats env
+getFloatBinds (SimplEnv {seFloats = Floats bs _})
+ = fromOL bs
isEmptyFloats :: SimplEnv -> Bool
-isEmptyFloats env = isEmptyFlts (seFloats env)
+isEmptyFloats (SimplEnv {seFloats = Floats bs _})
+ = isNilOL bs
-isEmptyFlts :: Floats -> Bool
-isEmptyFlts (Floats bs _) = isNilOL bs
+-- mapFloats commented out: used only in a commented-out bit of Simplify,
+-- concerning ticks
+--
+mapFloats :: SimplEnv -> ((Id,CoreExpr) -> (Id,CoreExpr)) -> SimplEnv
+mapFloats env@SimplEnv { seFloats = Floats fs ff } fun
+ = env { seFloats = Floats (mapOL app fs) ff }
+ where
+ app (NonRec b e) = case fun (b,e) of (b',e') -> NonRec b' e'
+ app (Rec bs) = Rec (map fun bs)
-floatBinds :: Floats -> [OutBind]
-floatBinds (Floats bs _) = fromOL bs
\end{code}
-
%************************************************************************
%* *
Substitution of Vars
diff --git a/compiler/simplCore/SimplUtils.lhs b/compiler/simplCore/SimplUtils.lhs
index 6c7dcc204218..b2b8ea91893b 100644
--- a/compiler/simplCore/SimplUtils.lhs
+++ b/compiler/simplCore/SimplUtils.lhs
@@ -4,6 +4,8 @@
\section[SimplUtils]{The simplifier utilities}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module SimplUtils (
-- Rebuilding
mkLam, mkCase, prepareAlts, tryEtaExpandRhs,
@@ -60,6 +62,7 @@ import FastString
import Pair
import Control.Monad ( when )
+import Data.List ( partition )
\end{code}
@@ -852,6 +855,10 @@ the former.
\begin{code}
preInlineUnconditionally :: DynFlags -> SimplEnv -> TopLevelFlag -> InId -> InExpr -> Bool
+-- Precondition: rhs satisfies the let/app invariant
+-- See Note [CoreSyn let/app invariant] in CoreSyn
+-- Reason: we don't want to inline single uses, or discard dead bindings,
+-- for unlifted, side-effect-full bindings
preInlineUnconditionally dflags env top_lvl bndr rhs
| not active = False
| isStableUnfolding (idUnfolding bndr) = False -- Note [InlineRule and preInlineUnconditionally]
@@ -894,6 +901,7 @@ preInlineUnconditionally dflags env top_lvl bndr rhs
-- Sadly, not quite the same as exprIsHNF.
canInlineInLam (Lit _) = True
canInlineInLam (Lam b e) = isRuntimeVar b || canInlineInLam e
+ canInlineInLam (Tick t e) = not (tickishIsCode t) && canInlineInLam e
canInlineInLam _ = False
-- not ticks. Counting ticks cannot be duplicated, and non-counting
-- ticks around a Lam will disappear anyway.
@@ -961,6 +969,10 @@ postInlineUnconditionally
-> OutExpr
-> Unfolding
-> Bool
+-- Precondition: rhs satisfies the let/app invariant
+-- See Note [CoreSyn let/app invariant] in CoreSyn
+-- Reason: we don't want to inline single uses, or discard dead bindings,
+-- for unlifted, side-effect-full bindings
postInlineUnconditionally dflags env top_lvl bndr occ_info rhs unfolding
| not active = False
| isWeakLoopBreaker occ_info = False -- If it's a loop-breaker of any kind, don't inline
@@ -1190,15 +1202,14 @@ because the latter is not well-kinded.
\begin{code}
tryEtaExpandRhs :: SimplEnv -> OutId -> OutExpr -> SimplM (Arity, OutExpr)
-- See Note [Eta-expanding at let bindings]
--- and Note [Eta expansion to manifest arity]
tryEtaExpandRhs env bndr rhs
= do { dflags <- getDynFlags
; (new_arity, new_rhs) <- try_expand dflags
- ; WARN( new_arity < old_arity || new_arity < _dmd_arity,
- (ptext (sLit "Arity decrease:") <+> (ppr bndr <+> ppr old_arity
- <+> ppr new_arity <+> ppr _dmd_arity) $$ ppr new_rhs) )
- -- Note [Arity decrease]
+ ; WARN( new_arity < old_id_arity,
+ (ptext (sLit "Arity decrease:") <+> (ppr bndr <+> ppr old_id_arity
+ <+> ppr old_arity <+> ppr new_arity) $$ ppr new_rhs) )
+ -- Note [Arity decrease] in Simplify
return (new_arity, new_rhs) }
where
try_expand dflags
@@ -1206,16 +1217,17 @@ tryEtaExpandRhs env bndr rhs
= return (exprArity rhs, rhs)
| sm_eta_expand (getMode env) -- Provided eta-expansion is on
- , let new_arity = findRhsArity dflags bndr rhs old_arity
- , new_arity > manifest_arity -- And the curent manifest arity isn't enough
+ , let new_arity1 = findRhsArity dflags bndr rhs old_arity
+ new_arity2 = idCallArity bndr
+ new_arity = max new_arity1 new_arity2
+ , new_arity > old_arity -- And the current manifest arity isn't enough
= do { tick (EtaExpansion bndr)
; return (new_arity, etaExpand new_arity rhs) }
| otherwise
- = return (manifest_arity, rhs)
+ = return (old_arity, rhs)
- manifest_arity = manifestArity rhs
- old_arity = idArity bndr
- _dmd_arity = length $ fst $ splitStrictSig $ idStrictness bndr
+ old_arity = exprArity rhs -- See Note [Do not expand eta-expand PAPs]
+ old_id_arity = idArity bndr
\end{code}
Note [Eta-expanding at let bindings]
@@ -1224,7 +1236,7 @@ We now eta expand at let-bindings, which is where the payoff comes.
The most significant thing is that we can do a simple arity analysis
(in CoreArity.findRhsArity), which we can't do for free-floating lambdas
-One useful consequence is this example:
+One useful consequence of not eta-expanding lambdas is this example:
genMap :: C a => ...
{-# INLINE genMap #-}
genMap f xs = ...
@@ -1234,7 +1246,7 @@ One useful consequence is this example:
myMap = genMap
Notice that 'genMap' should only inline if applied to two arguments.
-In the InlineRule for myMap we'll have the unfolding
+In the stable unfolding for myMap we'll have the unfolding
(\d -> genMap Int (..d..))
We do not want to eta-expand to
(\d f xs -> genMap Int (..d..) f xs)
@@ -1242,6 +1254,29 @@ because then 'genMap' will inline, and it really shouldn't: at least
as far as the programmer is concerned, it's not applied to two
arguments!
+Note [Do not eta-expand PAPs]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We used to have old_arity = manifestArity rhs, which meant that we
+would eta-expand even PAPs. But this gives no particular advantage,
+and can lead to a massive blow-up in code size, exhibited by Trac #9020.
+Suppose we have a PAP
+ foo :: IO ()
+ foo = returnIO ()
+Then we can eta-expand do
+ foo = (\eta. (returnIO () |> sym g) eta) |> g
+where
+ g :: IO () ~ State# RealWorld -> (# State# RealWorld, () #)
+
+But there is really no point in doing this, and it generates masses of
+coercions and whatnot that eventually disappear again. For T9020, GHC
+allocated 6.6G beore, and 0.8G afterwards; and residency dropped from
+1.8G to 45M.
+
+But note that this won't eta-expand, say
+ f = \g -> map g
+Does it matter not eta-expanding such functions? I'm not sure. Perhaps
+strictness analysis will have less to bite on?
+
%************************************************************************
%* *
@@ -1553,10 +1588,12 @@ combineIdenticalAlts case_bndr ((_con1,bndrs1,rhs1) : con_alts)
| all isDeadBinder bndrs1 -- Remember the default
, length filtered_alts < length con_alts -- alternative comes first
= do { tick (AltMerge case_bndr)
- ; return ((DEFAULT, [], rhs1) : filtered_alts) }
+ ; return ((DEFAULT, [], mkTicks (concat tickss) rhs1) : filtered_alts) }
where
- filtered_alts = filterOut identical_to_alt1 con_alts
- identical_to_alt1 (_con,bndrs,rhs) = all isDeadBinder bndrs && rhs `cheapEqExpr` rhs1
+ (eliminated_alts, filtered_alts) = partition identical_to_alt1 con_alts
+ cheapEqTicked = cheapEqExpr' tickishFloatable
+ identical_to_alt1 (_con,bndrs,rhs) = all isDeadBinder bndrs && rhs `cheapEqTicked` rhs1
+ tickss = map (fst . stripTicks tickishFloatable . thirdOf3) eliminated_alts
combineIdenticalAlts _ alts = return alts
\end{code}
@@ -1610,7 +1647,8 @@ mkCase, mkCase1, mkCase2
mkCase dflags scrut outer_bndr alts_ty ((DEFAULT, _, deflt_rhs) : outer_alts)
| gopt Opt_CaseMerge dflags
- , Case (Var inner_scrut_var) inner_bndr _ inner_alts <- deflt_rhs
+ , (ticks, Case (Var inner_scrut_var) inner_bndr _ inner_alts)
+ <- stripTicksTop tickishFloatable deflt_rhs
, inner_scrut_var == outer_bndr
= do { tick (CaseMerge outer_bndr)
@@ -1634,7 +1672,8 @@ mkCase dflags scrut outer_bndr alts_ty ((DEFAULT, _, deflt_rhs) : outer_alts)
-- When we merge, we must ensure that e1 takes
-- precedence over e2 as the value for A!
- ; mkCase1 dflags scrut outer_bndr alts_ty merged_alts
+ ; fmap (mkTicks ticks) $
+ mkCase1 dflags scrut outer_bndr alts_ty merged_alts
}
-- Warning: don't call mkCase recursively!
-- Firstly, there's no point, because inner alts have already had
@@ -1649,10 +1688,13 @@ mkCase dflags scrut bndr alts_ty alts = mkCase1 dflags scrut bndr alts_ty alts
--------------------------------------------------
mkCase1 _dflags scrut case_bndr _ alts@((_,_,rhs1) : _) -- Identity case
- | all identity_alt alts
+ | all identity_alt alts'
= do { tick (CaseIdentity case_bndr)
- ; return (re_cast scrut rhs1) }
+ ; return (mkTicks (concat tickss) $
+ re_cast scrut rhs1) }
where
+ tickss = map (fst . stripTicksTop tickishFloatable . thirdOf3) alts
+ alts' = map (third3 (snd . stripTicksTop tickishFloatable)) alts
identity_alt (con, args, rhs) = check_eq rhs con args
check_eq (Cast rhs co) con args = not (any (`elemVarSet` tyCoVarsOfCo co) args)
diff --git a/compiler/simplCore/Simplify.lhs b/compiler/simplCore/Simplify.lhs
index 3873ed3c822b..d75a5a06c81f 100644
--- a/compiler/simplCore/Simplify.lhs
+++ b/compiler/simplCore/Simplify.lhs
@@ -4,6 +4,8 @@
\section[Simplify]{The main module of the simplifier}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Simplify ( simplTopBinds, simplExpr ) where
#include "HsVersions.h"
@@ -28,7 +30,7 @@ import DataCon ( DataCon, dataConWorkId, dataConRepStrictness
--import TyCon ( isEnumerationTyCon ) -- temporalily commented out. See #8326
import CoreMonad ( Tick(..), SimplifierMode(..) )
import CoreSyn
-import Demand ( StrictSig(..), dmdTypeDepth )
+import Demand ( StrictSig(..), dmdTypeDepth, isStrictDmd )
import PprCore ( pprParendExpr, pprCoreExpr )
import CoreUnfold
import CoreUtils
@@ -219,9 +221,7 @@ simplTopBinds env0 binds0
-- It's rather as if the top-level binders were imported.
-- See note [Glomming] in OccurAnal.
; env1 <- simplRecBndrs env0 (bindersOfBinds binds0)
- ; dflags <- getDynFlags
- ; let dump_flag = dopt Opt_D_verbose_core2core dflags
- ; env2 <- simpl_binds dump_flag env1 binds0
+ ; env2 <- simpl_binds env1 binds0
; freeTick SimplifierDone
; return env2 }
where
@@ -229,16 +229,10 @@ simplTopBinds env0 binds0
-- they should have their fragile IdInfo zapped (notably occurrence info)
-- That's why we run down binds and bndrs' simultaneously.
--
- -- The dump-flag emits a trace for each top-level binding, which
- -- helps to locate the tracing for inlining and rule firing
- simpl_binds :: Bool -> SimplEnv -> [InBind] -> SimplM SimplEnv
- simpl_binds _ env [] = return env
- simpl_binds dump env (bind:binds) = do { env' <- trace_bind dump bind $
- simpl_bind env bind
- ; simpl_binds dump env' binds }
-
- trace_bind True bind = pprTrace "SimplBind" (ppr (bindersOf bind))
- trace_bind False _ = \x -> x
+ simpl_binds :: SimplEnv -> [InBind] -> SimplM SimplEnv
+ simpl_binds env [] = return env
+ simpl_binds env (bind:binds) = do { env' <- simpl_bind env bind
+ ; simpl_binds env' binds }
simpl_bind env (Rec pairs) = simplRecBind env TopLevel pairs
simpl_bind env (NonRec b r) = simplRecOrTopPair env' TopLevel NonRecursive b b' r
@@ -293,12 +287,21 @@ simplRecOrTopPair :: SimplEnv
-> SimplM SimplEnv -- Returns an env that includes the binding
simplRecOrTopPair env top_lvl is_rec old_bndr new_bndr rhs
- = do dflags <- getDynFlags
- -- Check for unconditional inline
- if preInlineUnconditionally dflags env top_lvl old_bndr rhs
+ = do { dflags <- getDynFlags
+ ; trace_bind dflags $
+ if preInlineUnconditionally dflags env top_lvl old_bndr rhs
+ -- Check for unconditional inline
then do tick (PreInlineUnconditionally old_bndr)
return (extendIdSubst env old_bndr (mkContEx env rhs))
- else simplLazyBind env top_lvl is_rec old_bndr new_bndr rhs env
+ else simplLazyBind env top_lvl is_rec old_bndr new_bndr rhs env }
+ where
+ trace_bind dflags thing_inside
+ | not (dopt Opt_D_verbose_core2core dflags)
+ = thing_inside
+ | otherwise
+ = pprTrace "SimplBind" (ppr old_bndr) thing_inside
+ -- trace_bind emits a trace for each top-level binding, which
+ -- helps to locate the tracing for inlining and rule firing
\end{code}
@@ -323,19 +326,21 @@ simplLazyBind :: SimplEnv
-- The OutId has IdInfo, except arity, unfolding
-> InExpr -> SimplEnv -- The RHS and its environment
-> SimplM SimplEnv
-
+-- Precondition: rhs obeys the let/app invariant
simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se
= -- pprTrace "simplLazyBind" ((ppr bndr <+> ppr bndr1) $$ ppr rhs $$ ppr (seIdSubst rhs_se)) $
do { let rhs_env = rhs_se `setInScope` env
(tvs, body) = case collectTyBinders rhs of
(tvs, body) | not_lam body -> (tvs,body)
| otherwise -> ([], rhs)
- not_lam (Lam _ _) = False
- not_lam _ = True
+ not_lam (Lam _ _) = False
+ not_lam (Tick t e) | not (tickishFloatable t)
+ = not_lam e -- eta-reduction could float: ignore
+ not_lam _ = True
-- Do not do the "abstract tyyvar" thing if there's
-- a lambda inside, because it defeats eta-reduction
-- f = /\a. \x. g a x
- -- should eta-reduce
+ -- should eta-reduce.
; (body_env, tvs') <- simplBinders rhs_env tvs
@@ -375,11 +380,12 @@ simplNonRecX :: SimplEnv
-> InId -- Old binder
-> OutExpr -- Simplified RHS
-> SimplM SimplEnv
-
+-- Precondition: rhs satisfies the let/app invariant
simplNonRecX env bndr new_rhs
| isDeadBinder bndr -- Not uncommon; e.g. case (a,b) of c { (p,q) -> p }
- = return env -- Here c is dead, and we avoid creating
- -- the binding c = (a,b)
+ = return env -- Here c is dead, and we avoid creating
+ -- the binding c = (a,b)
+
| Coercion co <- new_rhs
= return (extendCvSubst env bndr co)
@@ -394,6 +400,8 @@ completeNonRecX :: TopLevelFlag -> SimplEnv
-> OutId -- New binder
-> OutExpr -- Simplified RHS
-> SimplM SimplEnv
+-- Precondition: rhs satisfies the let/app invariant
+-- See Note [CoreSyn let/app invariant] in CoreSyn
completeNonRecX top_lvl env is_strict old_bndr new_bndr new_rhs
= do { (env1, rhs1) <- prepareRhs top_lvl (zapFloats env) new_bndr new_rhs
@@ -483,6 +491,20 @@ prepareRhs top_lvl env0 _ rhs0
-- The definition of is_exp should match that in
-- OccurAnal.occAnalApp
+ go n_val_args env (Tick t rhs)
+ -- We want to be able to float bindings past this
+ -- tick. Non-scoping ticks don't care.
+ | tickishScoped t == NoScope
+ = do { (is_exp, env', rhs') <- go n_val_args env rhs
+ ; return (is_exp, env', Tick t rhs') }
+ -- On the other hand, for scoping ticks we need to be able to
+ -- copy them on the floats, which in turn is only allowed if
+ -- we can obtain non-counting ticks.
+ | not (tickishCounts t) || tickishCanSplit t
+ = do { (is_exp, env', rhs') <- go n_val_args env rhs
+ ; let tickIt (id, expr) = (id, mkTick (mkNoCount t) expr)
+ ; return (is_exp, mapFloats env' tickIt, Tick t rhs') }
+
go _ env other
= return (False, env, other)
\end{code}
@@ -641,7 +663,8 @@ completeBind :: SimplEnv
-- completeBind may choose to do its work
-- * by extending the substitution (e.g. let x = y in ...)
-- * or by adding to the floats in the envt
-
+--
+-- Precondition: rhs obeys the let/app invariant
completeBind env top_lvl old_bndr new_bndr new_rhs
| isCoVar old_bndr
= case new_rhs of
@@ -730,53 +753,51 @@ simplUnfolding :: SimplEnv-> TopLevelFlag
-> OutExpr
-> Unfolding -> SimplM Unfolding
-- Note [Setting the new unfolding]
-simplUnfolding env _ _ _ df@(DFunUnfolding { df_bndrs = bndrs, df_args = args })
- = do { (env', bndrs') <- simplBinders env bndrs
- ; args' <- mapM (simplExpr env') args
- ; return (df { df_bndrs = bndrs', df_args = args' }) }
-
-simplUnfolding env top_lvl id _
- (CoreUnfolding { uf_tmpl = expr, uf_arity = arity
- , uf_src = src, uf_guidance = guide })
- | isStableSource src
- = do { expr' <- simplExpr rule_env expr
- ; let is_top_lvl = isTopLevel top_lvl
- ; case guide of
- UnfWhen sat_ok _ -- Happens for INLINE things
- -> let guide' = UnfWhen sat_ok (inlineBoringOk expr')
- -- Refresh the boring-ok flag, in case expr'
- -- has got small. This happens, notably in the inlinings
- -- for dfuns for single-method classes; see
- -- Note [Single-method classes] in TcInstDcls.
- -- A test case is Trac #4138
- in return (mkCoreUnfolding src is_top_lvl expr' arity guide')
- -- See Note [Top-level flag on inline rules] in CoreUnfold
-
- _other -- Happens for INLINABLE things
- -> let bottoming = isBottomingId id
- in bottoming `seq` -- See Note [Force bottoming field]
- do dflags <- getDynFlags
- return (mkUnfolding dflags src is_top_lvl bottoming expr')
+simplUnfolding env top_lvl id new_rhs unf
+ = case unf of
+ DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }
+ -> do { (env', bndrs') <- simplBinders rule_env bndrs
+ ; args' <- mapM (simplExpr env') args
+ ; return (mkDFunUnfolding bndrs' con args') }
+
+ CoreUnfolding { uf_tmpl = expr, uf_arity = arity
+ , uf_src = src, uf_guidance = guide }
+ | isStableSource src
+ -> do { expr' <- simplExpr rule_env expr
+ ; case guide of
+ UnfWhen sat_ok _ -- Happens for INLINE things
+ -> let guide' = UnfWhen sat_ok (inlineBoringOk expr')
+ -- Refresh the boring-ok flag, in case expr'
+ -- has got small. This happens, notably in the inlinings
+ -- for dfuns for single-method classes; see
+ -- Note [Single-method classes] in TcInstDcls.
+ -- A test case is Trac #4138
+ in return (mkCoreUnfolding src is_top_lvl expr' arity guide')
+ -- See Note [Top-level flag on inline rules] in CoreUnfold
+
+ _other -- Happens for INLINABLE things
+ -> bottoming `seq` -- See Note [Force bottoming field]
+ do { dflags <- getDynFlags
+ ; return (mkUnfolding dflags src is_top_lvl bottoming expr') } }
-- If the guidance is UnfIfGoodArgs, this is an INLINABLE
-- unfolding, and we need to make sure the guidance is kept up
-- to date with respect to any changes in the unfolding.
- }
+
+ _other -> bottoming `seq` -- See Note [Force bottoming field]
+ do { dflags <- getDynFlags
+ ; return (mkUnfolding dflags InlineRhs is_top_lvl bottoming new_rhs) }
+ -- We make an unfolding *even for loop-breakers*.
+ -- Reason: (a) It might be useful to know that they are WHNF
+ -- (b) In TidyPgm we currently assume that, if we want to
+ -- expose the unfolding then indeed we *have* an unfolding
+ -- to expose. (We could instead use the RHS, but currently
+ -- we don't.) The simple thing is always to have one.
where
+ bottoming = isBottomingId id
+ is_top_lvl = isTopLevel top_lvl
act = idInlineActivation id
rule_env = updMode (updModeForInlineRules act) env
-- See Note [Simplifying inside InlineRules] in SimplUtils
-
-simplUnfolding _ top_lvl id new_rhs _
- = let bottoming = isBottomingId id
- in bottoming `seq` -- See Note [Force bottoming field]
- do dflags <- getDynFlags
- return (mkUnfolding dflags InlineRhs (isTopLevel top_lvl) bottoming new_rhs)
- -- We make an unfolding *even for loop-breakers*.
- -- Reason: (a) It might be useful to know that they are WHNF
- -- (b) In TidyPgm we currently assume that, if we want to
- -- expose the unfolding then indeed we *have* an unfolding
- -- to expose. (We could instead use the RHS, but currently
- -- we don't.) The simple thing is always to have one.
\end{code}
Note [Force bottoming field]
@@ -1024,58 +1045,50 @@ simplTick env tickish expr cont
-- | tickishScoped tickish && not (tickishCounts tickish)
-- = simplExprF env expr (TickIt tickish cont)
- -- For non-scoped ticks, we push the continuation inside the
- -- tick. This has the effect of moving the tick to the outside of a
- -- case or application context, allowing the normal case and
- -- application optimisations to fire.
- | not (tickishScoped tickish)
+ -- For unscoped or soft-scoped ticks, we are allowed to float in new
+ -- cost, so we simply push the continuation inside the tick. This
+ -- has the effect of moving the tick to the outside of a case or
+ -- application context, allowing the normal case and application
+ -- optimisations to fire.
+ | tickish `tickishScopesLike` SoftScope
= do { (env', expr') <- simplExprF env expr cont
; return (env', mkTick tickish expr')
}
- -- For breakpoints, we cannot do any floating of bindings around the
- -- tick, because breakpoints cannot be split into tick/scope pairs.
- | not (tickishCanSplit tickish)
- = no_floating_past_tick
-
- | interesting_cont, Just expr' <- push_tick_inside tickish expr
- -- see Note [case-of-scc-of-case]
+ -- Push tick inside if the context looks like this will allow us to
+ -- do a case-of-case - see Note [case-of-scc-of-case]
+ | interesting_cont, Just expr' <- push_tick_inside
= simplExprF env expr' cont
+ -- We don't want to move the tick, but we might still want to allow
+ -- floats to pass through with appropriate wrapping (or not, see
+ -- wrap_floats below)
+ --- | not (tickishCounts tickish) || tickishCanSplit tickish
+ -- = wrap_floats
+
| otherwise
- = no_floating_past_tick -- was: wrap_floats, see below
+ = no_floating_past_tick
where
interesting_cont = case cont of
Select {} -> True
_ -> False
- push_tick_inside t expr0
- = ASSERT(tickishScoped t)
- case expr0 of
- Tick t' expr
- -- scc t (tick t' E)
- -- Pull the tick to the outside
- -- This one is important for #5363
- | not (tickishScoped t')
- -> Just (Tick t' (Tick t expr))
-
- -- scc t (scc t' E)
- -- Try to push t' into E first, and if that works,
- -- try to push t in again
- | Just expr' <- push_tick_inside t' expr
- -> push_tick_inside t expr'
-
- | otherwise -> Nothing
-
- Case scrut bndr ty alts
- | not (tickishCanSplit t) -> Nothing
- | otherwise -> Just (Case (mkTick t scrut) bndr ty alts')
- where t_scope = mkNoCount t -- drop the tick on the dup'd ones
- alts' = [ (c,bs, mkTick t_scope e) | (c,bs,e) <- alts]
-
- _other -> Nothing
- where
+ -- Try to push tick inside a case, see Note [case-of-scc-of-case].
+ push_tick_inside =
+ case expr0 of
+ Case scrut bndr ty alts
+ -> Just $ Case (tickScrut scrut) bndr ty (map tickAlt alts)
+ _other -> Nothing
+ where (ticks, expr0) = stripTicksTop movable (Tick tickish expr)
+ movable t = not (tickishCounts t) ||
+ t `tickishScopesLike` NoScope ||
+ tickishCanSplit t
+ tickScrut e = foldr mkTick e ticks
+ -- Alternatives get annotated with all ticks that scope in some way,
+ -- but we don't want to count entries.
+ tickAlt (c,bs,e) = (c,bs, foldr mkTick e ts_scope)
+ ts_scope = map mkNoCount $ filter (not . (`tickishScopesLike` NoScope)) ticks
no_floating_past_tick =
do { let (inc,outc) = splitCont cont
@@ -1176,6 +1189,8 @@ rebuild env expr cont
Select _ bndr alts se cont -> rebuildCase (se `setFloats` env) expr bndr alts cont
StrictArg info _ cont -> rebuildCall env (info `addArgTo` expr) cont
StrictBind b bs body se cont -> do { env' <- simplNonRecX (se `setFloats` env) b expr
+ -- expr satisfies let/app since it started life
+ -- in a call to simplNonRecE
; simplLam env' bs body cont }
ApplyTo dup_flag arg se cont -- See Note [Avoid redundant simplification]
| isSimplified dup_flag -> rebuild env (App expr arg) cont
@@ -1326,6 +1341,9 @@ simplNonRecE :: SimplEnv
-- It deals with strict bindings, via the StrictBind continuation,
-- which may abort the whole process
--
+-- Precondition: rhs satisfies the let/app invariant
+-- Note [CoreSyn let/app invariant] in CoreSyn
+--
-- The "body" of the binding comes as a pair of ([InId],InExpr)
-- representing a lambda; so we recurse back to simplLam
-- Why? Because of the binder-occ-info-zapping done before
@@ -1341,22 +1359,21 @@ simplNonRecE env bndr (Type ty_arg, rhs_se) (bndrs, body) cont
simplNonRecE env bndr (rhs, rhs_se) (bndrs, body) cont
= do dflags <- getDynFlags
case () of
- _
- | preInlineUnconditionally dflags env NotTopLevel bndr rhs ->
- do { tick (PreInlineUnconditionally bndr)
- ; -- pprTrace "preInlineUncond" (ppr bndr <+> ppr rhs) $
+ _ | preInlineUnconditionally dflags env NotTopLevel bndr rhs
+ -> do { tick (PreInlineUnconditionally bndr)
+ ; -- pprTrace "preInlineUncond" (ppr bndr <+> ppr rhs) $
simplLam (extendIdSubst env bndr (mkContEx rhs_se rhs)) bndrs body cont }
- | isStrictId bndr -> -- Includes coercions
- do { simplExprF (rhs_se `setFloats` env) rhs
- (StrictBind bndr bndrs body env cont) }
+ | isStrictId bndr -- Includes coercions
+ -> simplExprF (rhs_se `setFloats` env) rhs
+ (StrictBind bndr bndrs body env cont)
- | otherwise ->
- ASSERT( not (isTyVar bndr) )
- do { (env1, bndr1) <- simplNonRecBndr env bndr
- ; let (env2, bndr2) = addBndrRules env1 bndr bndr1
- ; env3 <- simplLazyBind env2 NotTopLevel NonRecursive bndr bndr2 rhs rhs_se
- ; simplLam env3 bndrs body cont }
+ | otherwise
+ -> ASSERT( not (isTyVar bndr) )
+ do { (env1, bndr1) <- simplNonRecBndr env bndr
+ ; let (env2, bndr2) = addBndrRules env1 bndr bndr1
+ ; env3 <- simplLazyBind env2 NotTopLevel NonRecursive bndr bndr2 rhs rhs_se
+ ; simplLam env3 bndrs body cont }
\end{code}
%************************************************************************
@@ -1699,26 +1716,36 @@ This includes things like (==# a# b#)::Bool so that we simplify
to just
x
This particular example shows up in default methods for
-comparision operations (e.g. in (>=) for Int.Int32)
+comparison operations (e.g. in (>=) for Int.Int32)
Note [Case elimination: lifted case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We also make sure that we deal with this very common case,
-where x has a lifted type:
+If a case over a lifted type has a single alternative, and is being used
+as a strict 'let' (all isDeadBinder bndrs), we may want to do this
+transformation:
- case e of
- x -> ...x...
+ case e of r ===> let r = e in ...r...
+ _ -> ...r...
-Here we are using the case as a strict let; if x is used only once
-then we want to inline it. We have to be careful that this doesn't
-make the program terminate when it would have diverged before, so we
-check that
(a) 'e' is already evaluated (it may so if e is a variable)
- Specifically we check (exprIsHNF e)
+ Specifically we check (exprIsHNF e). In this case
+ we can just allocate the WHNF directly with a let.
or
(b) 'x' is not used at all and e is ok-for-speculation
+ The ok-for-spec bit checks that we don't lose any
+ exceptions or divergence.
+
+ NB: it'd be *sound* to switch from case to let if the
+ scrutinee was not yet WHNF but was guaranteed to
+ converge; but sticking with case means we won't build a
+ thunk
+
+or
+ (c) 'x' is used strictly in the body, and 'e' is a variable
+ Then we can just substitute 'e' for 'x' in the body.
+ See Note [Eliminating redundant seqs]
-For the (b), consider
+For (b), the "not used at all" test is important. Consider
case (case a ># b of { True -> (p,q); False -> (q,p) }) of
r -> blah
The scrutinee is ok-for-speculation (it looks inside cases), but we do
@@ -1727,33 +1754,42 @@ not want to transform to
in blah
because that builds an unnecessary thunk.
-Note [Case binder next]
-~~~~~~~~~~~~~~~~~~~~~~~
-If we have
- case e of f { _ -> f e1 e2 }
-then we can safely do CaseElim. The main criterion is that the
-case-binder is evaluated *next*. Previously we just asked that
-the case-binder is used strictly; but that can change
+Note [Eliminating redundant seqs]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+If we have this:
+ case x of r { _ -> ..r.. }
+where 'r' is used strictly in (..r..), the case is effectively a 'seq'
+on 'x', but since 'r' is used strictly anyway, we can safely transform to
+ (...x...)
+
+Note that this can change the error behaviour. For example, we might
+transform
case x of { _ -> error "bad" }
--> error "bad"
-which is very puzzling if 'x' is later bound to (error "good").
-Where the order of evaluation is specified (via seq or case)
-we should respect it.
+which is might be puzzling if 'x' currently lambda-bound, but later gets
+let-bound to (error "good").
+
+Nevertheless, the paper "A semantics for imprecise exceptions" allows
+this transformation. If you want to fix the evaluation order, use
+'pseq'. See Trac #8900 for an example where the loss of this
+transformation bit us in practice.
+
See also Note [Empty case alternatives] in CoreSyn.
-So instead we use case_bndr_evald_next to see when f is the *next*
-thing to be eval'd. This came up when fixing Trac #7542.
-See also Note [Eta reduction of an eval'd function] in CoreUtils.
+Just for reference, the original code (added Jan 13) looked like this:
+ || case_bndr_evald_next rhs
+
+ case_bndr_evald_next :: CoreExpr -> Bool
+ -- See Note [Case binder next]
+ case_bndr_evald_next (Var v) = v == case_bndr
+ case_bndr_evald_next (Cast e _) = case_bndr_evald_next e
+ case_bndr_evald_next (App e _) = case_bndr_evald_next e
+ case_bndr_evald_next (Case e _ _ _) = case_bndr_evald_next e
+ case_bndr_evald_next _ = False
- For reference, the old code was an extra disjunct in elim_lifted
- || (strict_case_bndr && scrut_is_var scrut)
- strict_case_bndr = isStrictDmd (idDemandInfo case_bndr)
- scrut_is_var (Cast s _) = scrut_is_var s
- scrut_is_var (Var _) = True
- scrut_is_var _ = False
+(This came up when fixing Trac #7542. See also Note [Eta reduction of
+an eval'd function] in CoreUtils.)
- -- True if evaluation of the case_bndr is the next
- -- thing to be eval'd. Then dropping the case
Note [Case elimination: unlifted case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1849,6 +1885,8 @@ rebuildCase env scrut case_bndr alts cont
where
simple_rhs bs rhs = ASSERT( null bs )
do { env' <- simplNonRecX env case_bndr scrut
+ -- scrut is a constructor application,
+ -- hence satisfies let/app invariant
; simplExprF env' rhs cont }
@@ -1856,58 +1894,41 @@ rebuildCase env scrut case_bndr alts cont
-- 2. Eliminate the case if scrutinee is evaluated
--------------------------------------------------
-rebuildCase env scrut case_bndr [(_, bndrs, rhs)] cont
+rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont
-- See if we can get rid of the case altogether
-- See Note [Case elimination]
-- mkCase made sure that if all the alternatives are equal,
-- then there is now only one (DEFAULT) rhs
- | all isDeadBinder bndrs -- bndrs are [InId]
-
- , if isUnLiftedType (idType case_bndr)
- then elim_unlifted -- Satisfy the let-binding invariant
- else elim_lifted
- = do { -- pprTrace "case elim" (vcat [ppr case_bndr, ppr (exprIsHNF scrut),
- -- ppr ok_for_spec,
- -- ppr scrut]) $
- tick (CaseElim case_bndr)
- ; env' <- simplNonRecX env case_bndr scrut
- -- If case_bndr is dead, simplNonRecX will discard
- ; simplExprF env' rhs cont }
- where
- elim_lifted -- See Note [Case elimination: lifted case]
- = exprIsHNF scrut
- || (is_plain_seq && ok_for_spec)
- -- Note: not the same as exprIsHNF
- || case_bndr_evald_next rhs
-
- elim_unlifted
- | is_plain_seq = exprOkForSideEffects scrut
- -- The entire case is dead, so we can drop it,
- -- _unless_ the scrutinee has side effects
- | otherwise = ok_for_spec
- -- The case-binder is alive, but we may be able
- -- turn the case into a let, if the expression is ok-for-spec
- -- See Note [Case elimination: unlifted case]
-
- ok_for_spec = exprOkForSpeculation scrut
- is_plain_seq = isDeadBinder case_bndr -- Evaluation *only* for effect
- case_bndr_evald_next :: CoreExpr -> Bool
- -- See Note [Case binder next]
- case_bndr_evald_next (Var v) = v == case_bndr
- case_bndr_evald_next (Cast e _) = case_bndr_evald_next e
- case_bndr_evald_next (App e _) = case_bndr_evald_next e
- case_bndr_evald_next (Case e _ _ _) = case_bndr_evald_next e
- case_bndr_evald_next _ = False
- -- Could add a case for Let,
- -- but I'm worried it could become expensive
-
---------------------------------------------------
--- 3. Try seq rules; see Note [User-defined RULES for seq] in MkId
---------------------------------------------------
-
-rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont
- | all isDeadBinder (case_bndr : bndrs) -- So this is just 'seq'
+ -- 2a. Dropping the case altogether, if
+ -- a) it binds nothing (so it's really just a 'seq')
+ -- b) evaluating the scrutinee has no side effects
+ | is_plain_seq
+ , exprOkForSideEffects scrut
+ -- The entire case is dead, so we can drop it
+ -- if the scrutinee converges without having imperative
+ -- side effects or raising a Haskell exception
+ -- See Note [PrimOp can_fail and has_side_effects] in PrimOp
+ = simplExprF env rhs cont
+
+ -- 2b. Turn the case into a let, if
+ -- a) it binds only the case-binder
+ -- b) unlifted case: the scrutinee is ok-for-speculation
+ -- lifted case: the scrutinee is in HNF (or will later be demanded)
+ | all_dead_bndrs
+ , if is_unlifted
+ then exprOkForSpeculation scrut -- See Note [Case elimination: unlifted case]
+ else exprIsHNF scrut -- See Note [Case elimination: lifted case]
+ || scrut_is_demanded_var scrut
+ = do { tick (CaseElim case_bndr)
+ ; env' <- simplNonRecX env case_bndr scrut
+ ; simplExprF env' rhs cont }
+
+ -- 2c. Try the seq rules if
+ -- a) it binds only the case binder
+ -- b) a rule for seq applies
+ -- See Note [User-defined RULES for seq] in MkId
+ | is_plain_seq
= do { let rhs' = substExpr (text "rebuild-case") env rhs
env' = zapSubstEnv env
out_args = [Type (substTy env (idType case_bndr)),
@@ -1919,6 +1940,17 @@ rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont
; case mb_rule of
Just (rule_rhs, cont') -> simplExprF env' rule_rhs cont'
Nothing -> reallyRebuildCase env scrut case_bndr alts cont }
+ where
+ is_unlifted = isUnLiftedType (idType case_bndr)
+ all_dead_bndrs = all isDeadBinder bndrs -- bndrs are [InId]
+ is_plain_seq = all_dead_bndrs && isDeadBinder case_bndr -- Evaluation *only* for effect
+
+ scrut_is_demanded_var :: CoreExpr -> Bool
+ -- See Note [Eliminating redundant seqs]
+ scrut_is_demanded_var (Cast s _) = scrut_is_demanded_var s
+ scrut_is_demanded_var (Var _) = isStrictDmd (idDemandInfo case_bndr)
+ scrut_is_demanded_var _ = False
+
rebuildCase env scrut case_bndr alts cont
= reallyRebuildCase env scrut case_bndr alts cont
@@ -2255,7 +2287,7 @@ knownCon env scrut dc dc_ty_args dc_args bndr bs rhs cont
-- it via postInlineUnconditionally.
-- Nevertheless we must keep it if the case-binder is alive,
-- because it may be used in the con_app. See Note [knownCon occ info]
- ; env'' <- simplNonRecX env' b' arg
+ ; env'' <- simplNonRecX env' b' arg -- arg satisfies let/app invariant
; bind_args env'' bs' args }
bind_args _ _ _ =
diff --git a/compiler/simplStg/SimplStg.lhs b/compiler/simplStg/SimplStg.lhs
index c43b6526b5fc..4d33e3392eb9 100644
--- a/compiler/simplStg/SimplStg.lhs
+++ b/compiler/simplStg/SimplStg.lhs
@@ -4,6 +4,8 @@
\section[SimplStg]{Driver for simplifying @STG@ programs}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module SimplStg ( stg2stg ) where
#include "HsVersions.h"
diff --git a/compiler/simplStg/StgStats.lhs b/compiler/simplStg/StgStats.lhs
index 542449546890..9bfe12442f82 100644
--- a/compiler/simplStg/StgStats.lhs
+++ b/compiler/simplStg/StgStats.lhs
@@ -21,6 +21,8 @@ The program gather statistics about
\end{enumerate}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module StgStats ( showStgStats ) where
#include "HsVersions.h"
@@ -153,8 +155,7 @@ statExpr (StgApp _ _) = countOne Applications
statExpr (StgLit _) = countOne Literals
statExpr (StgConApp _ _) = countOne ConstructorApps
statExpr (StgOpApp _ _ _) = countOne PrimitiveApps
-statExpr (StgSCC _ _ _ e) = statExpr e
-statExpr (StgTick _ _ e) = statExpr e
+statExpr (StgTick _ e) = statExpr e
statExpr (StgLetNoEscape _ _ binds body)
= statBinding False{-not top-level-} binds `combineSE`
diff --git a/compiler/simplStg/UnariseStg.lhs b/compiler/simplStg/UnariseStg.lhs
index b1717ad12064..535918ea7bda 100644
--- a/compiler/simplStg/UnariseStg.lhs
+++ b/compiler/simplStg/UnariseStg.lhs
@@ -27,6 +27,8 @@ which is the Arity taking into account any expanded arguments, and corresponds t
the number of (possibly-void) *registers* arguments will arrive in.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module UnariseStg (unarise) where
#include "HsVersions.h"
@@ -128,10 +130,8 @@ unariseExpr us rho (StgLetNoEscape live_in_let live_in_bind bind e)
where
(us1, us2) = splitUniqSupply us
-unariseExpr us rho (StgSCC cc bump_entry push_cc e)
- = StgSCC cc bump_entry push_cc (unariseExpr us rho e)
-unariseExpr us rho (StgTick mod tick_n e)
- = StgTick mod tick_n (unariseExpr us rho e)
+unariseExpr us rho (StgTick tick e)
+ = StgTick tick (unariseExpr us rho e)
------------------------
unariseAlts :: UniqSupply -> UnariseEnv -> AltType -> Id -> RepType -> [StgAlt] -> (AltType, [StgAlt])
diff --git a/compiler/specialise/Rules.lhs b/compiler/specialise/Rules.lhs
index b88888c96c93..084fc71f177c 100644
--- a/compiler/specialise/Rules.lhs
+++ b/compiler/specialise/Rules.lhs
@@ -4,6 +4,8 @@
\section[CoreRules]{Transformation rules}
\begin{code}
+{-# LANGUAGE CPP #-}
+
-- | Functions for collecting together and applying rewrite rules to a module.
-- The 'CoreRule' datatype itself is declared elsewhere.
module Rules (
@@ -33,7 +35,7 @@ import CoreSyn -- All of it
import CoreSubst
import OccurAnal ( occurAnalyseExpr )
import CoreFVs ( exprFreeVars, exprsFreeVars, bindFreeVars, rulesFreeVars )
-import CoreUtils ( exprType, eqExpr )
+import CoreUtils ( exprType, eqExpr, mkTick, stripTicksTop )
import PprCore ( pprRules )
import Type ( Type )
import TcType ( tcSplitTyConApp_maybe )
@@ -192,6 +194,8 @@ roughTopName (App f _) = roughTopName f
roughTopName (Var f) | isGlobalId f -- Note [Care with roughTopName]
, isDataConWorkId f || idArity f > 0
= Just (idName f)
+roughTopName (Tick t e) | tickishFloatable t
+ = roughTopName e
roughTopName _ = Nothing
ruleCantMatch :: [Maybe Name] -> [Maybe Name] -> Bool
@@ -474,8 +478,10 @@ matchRule :: DynFlags -> InScopeEnv -> (Activation -> Bool)
matchRule dflags rule_env _is_active fn args _rough_args
(BuiltinRule { ru_try = match_fn })
-- Built-in rules can't be switched off, it seems
- = case match_fn dflags rule_env fn args of
- Just expr -> Just expr
+ = let -- See Note [Tick annotations in RULE matching]
+ (tickss, args') = unzip $ map (stripTicksTop tickishFloatable) args
+ in case match_fn dflags rule_env fn args' of
+ Just expr -> Just $ foldr mkTick expr (concat tickss)
Nothing -> Nothing
matchRule _ in_scope is_active _ args rough_args
@@ -578,6 +584,9 @@ data RuleMatchEnv
, rv_unf :: IdUnfoldingFun
}
+rvInScopeEnv :: RuleMatchEnv -> InScopeEnv
+rvInScopeEnv renv = (rnInScopeSet (rv_lcl renv), rv_unf renv)
+
data RuleSubst = RS { rs_tv_subst :: TvSubstEnv -- Range is the
, rs_id_subst :: IdSubstEnv -- template variables
, rs_binds :: BindWrapper -- Floated bindings
@@ -607,6 +616,12 @@ match :: RuleMatchEnv
-> CoreExpr -- Target
-> Maybe RuleSubst
+-- We look through certain ticks. See note [Tick annotations in RULE matching]
+match renv subst e1 (Tick t e2)
+ | tickishFloatable t
+ = match renv subst' e1 e2
+ where subst' = subst { rs_binds = rs_binds subst . mkTick t }
+
-- See the notes with Unify.match, which matches types
-- Everything is very similar for terms
@@ -638,7 +653,8 @@ match renv subst e1 (Var v2) -- Note [Expanding variables]
-- because of the not-inRnEnvR
match renv subst e1 (Let bind e2)
- | okToFloat (rv_lcl renv) (bindFreeVars bind) -- See Note [Matching lets]
+ | -- pprTrace "match:Let" (vcat [ppr bind, ppr $ okToFloat (rv_lcl renv) (bindFreeVars bind)]) $
+ okToFloat (rv_lcl renv) (bindFreeVars bind) -- See Note [Matching lets]
= match (renv { rv_fltR = flt_subst' })
(subst { rs_binds = rs_binds subst . Let bind'
, rs_bndrs = extendVarSetList (rs_bndrs subst) new_bndrs })
@@ -671,30 +687,11 @@ match renv subst (App f1 a1) (App f2 a2)
= do { subst' <- match renv subst f1 f2
; match renv subst' a1 a2 }
-match renv subst (Lam x1 e1) (Lam x2 e2)
- = match renv' subst e1 e2
- where
- renv' = renv { rv_lcl = rnBndr2 (rv_lcl renv) x1 x2
- , rv_fltR = delBndr (rv_fltR renv) x2 }
-
--- This rule does eta expansion
--- (\x.M) ~ N iff M ~ N x
--- It's important that this is *after* the let rule,
--- so that (\x.M) ~ (let y = e in \y.N)
--- does the let thing, and then gets the lam/lam rule above
match renv subst (Lam x1 e1) e2
- = match renv' subst e1 (App e2 (varToCoreExpr new_x))
- where
- (rn_env', new_x) = rnEtaL (rv_lcl renv) x1
- renv' = renv { rv_lcl = rn_env' }
-
--- Eta expansion the other way
--- M ~ (\y.N) iff M y ~ N
-match renv subst e1 (Lam x2 e2)
- = match renv' subst (App e1 (varToCoreExpr new_x)) e2
- where
- (rn_env', new_x) = rnEtaR (rv_lcl renv) x2
- renv' = renv { rv_lcl = rn_env' }
+ | Just (x2, e2) <- exprIsLambda_maybe (rvInScopeEnv renv) e2
+ = let renv' = renv { rv_lcl = rnBndr2 (rv_lcl renv) x1 x2
+ , rv_fltR = delBndr (rv_fltR renv) x2 }
+ in match renv' subst e1 e2
match renv subst (Case e1 x1 ty1 alts1) (Case e2 x2 ty2 alts2)
= do { subst1 <- match_ty renv subst ty1 ty2
@@ -729,9 +726,28 @@ match_co renv subst (Refl r1 ty1) co
Refl r2 ty2
| r1 == r2 -> match_ty renv subst ty1 ty2
_ -> Nothing
-match_co _ _ co1 _
- = pprTrace "match_co: needs more cases" (ppr co1) Nothing
- -- Currently just deals with CoVarCo and Refl
+match_co renv subst (TyConAppCo r1 tc1 cos1) co2
+ = case co2 of
+ TyConAppCo r2 tc2 cos2
+ | r1 == r2 && tc1 == tc2
+ -> match_cos renv subst cos1 cos2
+ _ -> Nothing
+match_co _ _ co1 co2
+ = pprTrace "match_co: needs more cases" (ppr co1 $$ ppr co2) Nothing
+ -- Currently just deals with CoVarCo, TyConAppCo and Refl
+
+match_cos :: RuleMatchEnv
+ -> RuleSubst
+ -> [Coercion]
+ -> [Coercion]
+ -> Maybe RuleSubst
+match_cos renv subst (co1:cos1) (co2:cos2) =
+ case match_co renv subst co1 co2 of
+ Just subst' -> match_cos renv subst' cos1 cos2
+ Nothing -> Nothing
+match_cos _ subst [] [] = Just subst
+match_cos _ _ cos1 cos2 = pprTrace "match_cos: not same length" (ppr cos1 $$ ppr cos2) Nothing
+
-------------
rnMatchBndr2 :: RuleMatchEnv -> RuleSubst -> Var -> Var -> RuleMatchEnv
@@ -887,10 +903,17 @@ Hence, (a) the guard (not (isLocallyBoundR v2))
Note [Tick annotations in RULE matching]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We used to look through Notes in both template and expression being
-matched. This would be incorrect for ticks, which we cannot discard,
-so we do not look through Ticks at all. cf Note [Notes in call
-patterns] in SpecConstr
+
+We used to unconditionally look through Notes in both template and
+expression being matched. This is actually illegal for counting or
+cost-centre-scoped ticks, because we have no place to put them without
+changing entry counts and/or costs. So now we just fail the match in
+these cases.
+
+On the other hand, where we are allowed to insert new cost into the
+tick scope, we can float them upwards to the rule application site.
+
+cf Note [Notes in call patterns] in SpecConstr
Note [Matching lets]
~~~~~~~~~~~~~~~~~~~~
@@ -998,6 +1021,7 @@ at all.
That is why the 'lookupRnInScope' call in the (Var v2) case of 'match'
is so important.
+
%************************************************************************
%* *
Rule-check the program
diff --git a/compiler/specialise/SpecConstr.lhs b/compiler/specialise/SpecConstr.lhs
index 060c705cda4f..be68b131c557 100644
--- a/compiler/specialise/SpecConstr.lhs
+++ b/compiler/specialise/SpecConstr.lhs
@@ -9,6 +9,8 @@ ToDo [Oct 2013]
\section[SpecConstr]{Specialise over constructors}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module SpecConstr(
specConstrProgram
#ifdef GHCI
@@ -335,7 +337,7 @@ I wonder if SpecConstr couldn't be extended to handle this? After all,
lambda is a sort of constructor for functions and perhaps it already
has most of the necessary machinery?
-Furthermore, there's an immediate win, because you don't need to allocate the lamda
+Furthermore, there's an immediate win, because you don't need to allocate the lambda
at the call site; and if perchance it's called in the recursive call, then you
may avoid allocating it altogether. Just like for constructors.
@@ -396,16 +398,19 @@ use the calls in the un-specialised RHS as seeds. We call these
Note [Top-level recursive groups]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If all the bindings in a top-level recursive group are not exported,
-all the calls are in the rest of the top-level bindings.
-This means we can specialise with those call patterns instead of with the RHSs
-of the recursive group.
+If all the bindings in a top-level recursive group are local (not
+exported), then all the calls are in the rest of the top-level
+bindings. This means we can specialise with those call patterns
+instead of with the RHSs of the recursive group.
+
+(Question: maybe we should *also* use calls in the rest of the
+top-level bindings as seeds?
-To get the call usage information, we work backwards through the top-level bindings
-so we see the usage before we get to the binding of the function.
-Before we can collect the usage though, we go through all the bindings and add them
-to the environment. This is necessary because usage is only tracked for functions
-in the environment.
+To get the call usage information, we work backwards through the
+top-level bindings so we see the usage before we get to the binding of
+the function. Before we can collect the usage though, we go through
+all the bindings and add them to the environment. This is necessary
+because usage is only tracked for functions in the environment.
The actual seeding of the specialisation is very similar to Note [Local recursive group].
@@ -880,7 +885,8 @@ extendCaseBndrs env scrut case_bndr con alt_bndrs
= (env2, alt_bndrs')
where
live_case_bndr = not (isDeadBinder case_bndr)
- env1 | Var v <- scrut = extendValEnv env v cval
+ env1 | Var v <- snd $ stripTicksTop (const True) scrut
+ = extendValEnv env v cval
| otherwise = env -- See Note [Add scrutinee to ValueEnv too]
env2 | live_case_bndr = extendValEnv env1 case_bndr cval
| otherwise = env1
@@ -1323,16 +1329,14 @@ scTopBind env usage (Rec prs)
= do { (rhs_usgs, rhss') <- mapAndUnzipM (scExpr env) rhss
; return (usage `combineUsage` (combineUsages rhs_usgs), Rec (bndrs `zip` rhss')) }
| otherwise -- Do specialisation
- = do { (rhs_usgs, rhs_infos) <- mapAndUnzipM (scRecRhs env) (bndrs `zip` rhss)
+ = do { (rhs_usgs, rhs_infos) <- mapAndUnzipM (scRecRhs env) prs
-- ; pprTrace "scTopBind" (ppr bndrs $$ ppr (map (lookupVarEnv (scu_calls usage)) bndrs)) (return ())
-- Note [Top-level recursive groups]
- ; let (usg,rest) = if all (not . isExportedId) bndrs
- then -- pprTrace "scTopBind-T" (ppr bndrs $$ ppr (map (fmap (map snd) . lookupVarEnv (scu_calls usage)) bndrs))
- ( usage
- , [SI [] 0 (Just us) | us <- rhs_usgs] )
- else ( combineUsages rhs_usgs
- , [SI [] 0 Nothing | _ <- rhs_usgs] )
+ ; let (usg,rest) | any isExportedId bndrs -- Seed from RHSs
+ = ( combineUsages rhs_usgs, [SI [] 0 Nothing | _ <- rhs_usgs] )
+ | otherwise -- Seed from body only
+ = ( usage, [SI [] 0 (Just us) | us <- rhs_usgs] )
; (usage', specs) <- specLoop (scForce env force_spec)
(scu_calls usg) rhs_infos nullUsage rest
@@ -1446,11 +1450,6 @@ specialise env bind_calls (RI fn _ arg_bndrs body arg_occs)
, notNull arg_bndrs -- Only specialise functions
, Just all_calls <- lookupVarEnv bind_calls fn
= do { (boring_call, pats) <- callsToPats env specs arg_occs all_calls
--- ; pprTrace "specialise" (vcat [ ppr fn <+> text "with" <+> int (length pats) <+> text "good patterns"
--- , text "arg_occs" <+> ppr arg_occs
--- , text "calls" <+> ppr all_calls
--- , text "good pats" <+> ppr pats]) $
--- return ()
-- Bale out if too many specialisations
; let n_pats = length pats
@@ -1473,12 +1472,25 @@ specialise env bind_calls (RI fn _ arg_bndrs body arg_occs)
_normal_case -> do {
- let spec_env = decreaseSpecCount env n_pats
+-- ; if (not (null pats) || isJust mb_unspec) then
+-- pprTrace "specialise" (vcat [ ppr fn <+> text "with" <+> int (length pats) <+> text "good patterns"
+-- , text "mb_unspec" <+> ppr (isJust mb_unspec)
+-- , text "arg_occs" <+> ppr arg_occs
+-- , text "good pats" <+> ppr pats]) $
+-- return ()
+-- else return ()
+
+ ; let spec_env = decreaseSpecCount env n_pats
; (spec_usgs, new_specs) <- mapAndUnzipM (spec_one spec_env fn arg_bndrs body)
(pats `zip` [spec_count..])
-- See Note [Specialise original body]
; let spec_usg = combineUsages spec_usgs
+
+ -- If there were any boring calls among the seeds (= all_calls), then those
+ -- calls will call the un-specialised function. So we should use the seeds
+ -- from the _unspecialised_ function's RHS, which are in mb_unspec, by returning
+ -- then in new_usg.
(new_usg, mb_unspec')
= case mb_unspec of
Just rhs_usg | boring_call -> (spec_usg `combineUsage` rhs_usg, Nothing)
@@ -1906,6 +1918,10 @@ isValue env (Lam b e)
Nothing -> Nothing
| otherwise = Just LambdaVal
+isValue env (Tick t e)
+ | not (tickishIsCode t)
+ = isValue env e
+
isValue _env expr -- Maybe it's a constructor application
| (Var fun, args) <- collectArgs expr
= case isDataConWorkId_maybe fun of
diff --git a/compiler/specialise/Specialise.lhs b/compiler/specialise/Specialise.lhs
index 3191ae946e79..baa5d1971fe0 100644
--- a/compiler/specialise/Specialise.lhs
+++ b/compiler/specialise/Specialise.lhs
@@ -4,6 +4,8 @@
\section[Specialise]{Stamping out overloading, and (optionally) polymorphism}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Specialise ( specProgram ) where
#include "HsVersions.h"
diff --git a/compiler/stgSyn/CoreToStg.lhs b/compiler/stgSyn/CoreToStg.lhs
index 2c4df6955f9a..ef0599bf2dab 100644
--- a/compiler/stgSyn/CoreToStg.lhs
+++ b/compiler/stgSyn/CoreToStg.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE BangPatterns, CPP #-}
+
--
-- (c) The GRASP/AQUA Project, Glasgow University, 1993-1998
--
@@ -28,7 +30,6 @@ import DataCon
import CostCentre ( noCCS )
import VarSet
import VarEnv
-import Maybes ( maybeToBool )
import Module
import Name ( getOccName, isExternalName, nameOccName )
import OccName ( occNameString, occNameFS )
@@ -44,6 +45,7 @@ import ForeignCall
import Demand ( isSingleUsed )
import PrimOp ( PrimCall(..) )
+import Data.Maybe (isJust)
import Control.Monad (liftM, ap)
-- Note [Live vs free]
@@ -316,28 +318,9 @@ mkTopStgRhs :: DynFlags -> Module -> FreeVarsInfo
-> SRT -> Id -> StgBinderInfo -> StgExpr
-> StgRhs
-mkTopStgRhs _ _ rhs_fvs srt _ binder_info (StgLam bndrs body)
- = StgRhsClosure noCCS binder_info
- (getFVs rhs_fvs)
- ReEntrant
- srt
- bndrs body
-
-mkTopStgRhs dflags this_mod _ _ _ _ (StgConApp con args)
- | not (isDllConApp dflags this_mod con args) -- Dynamic StgConApps are updatable
- = StgRhsCon noCCS con args
-
-mkTopStgRhs _ _ rhs_fvs srt bndr binder_info rhs
- = StgRhsClosure noCCS binder_info
- (getFVs rhs_fvs)
- (getUpdateFlag bndr)
- srt
- [] rhs
-
-getUpdateFlag :: Id -> UpdateFlag
-getUpdateFlag bndr
- = if isSingleUsed (idDemandInfo bndr)
- then SingleEntry else Updatable
+mkTopStgRhs dflags this_mod = mkStgRhs' con_updateable
+ -- Dynamic StgConApps are updatable
+ where con_updateable con args = isDllConApp dflags this_mod con args
-- ---------------------------------------------------------------------------
-- Expressions
@@ -363,13 +346,13 @@ coreToStgExpr
-- should have converted them all to a real core representation.
coreToStgExpr (Lit (LitInteger {})) = panic "coreToStgExpr: LitInteger"
coreToStgExpr (Lit l) = return (StgLit l, emptyFVInfo, emptyVarSet)
-coreToStgExpr (Var v) = coreToStgApp Nothing v []
-coreToStgExpr (Coercion _) = coreToStgApp Nothing coercionTokenId []
+coreToStgExpr (Var v) = coreToStgApp Nothing v [] []
+coreToStgExpr (Coercion _) = coreToStgApp Nothing coercionTokenId [] []
coreToStgExpr expr@(App _ _)
- = coreToStgApp Nothing f args
+ = coreToStgApp Nothing f args ticks
where
- (f, args) = myCollectArgs expr
+ (f, args, ticks) = myCollectArgs expr
coreToStgExpr expr@(Lam _ _)
= let
@@ -386,16 +369,15 @@ coreToStgExpr expr@(Lam _ _)
return (result_expr, fvs, escs)
-coreToStgExpr (Tick (HpcTick m n) expr)
- = do (expr2, fvs, escs) <- coreToStgExpr expr
- return (StgTick m n expr2, fvs, escs)
-
-coreToStgExpr (Tick (ProfNote cc tick push) expr)
- = do (expr2, fvs, escs) <- coreToStgExpr expr
- return (StgSCC cc tick push expr2, fvs, escs)
-
-coreToStgExpr (Tick Breakpoint{} _expr)
- = panic "coreToStgExpr: breakpoint should not happen"
+coreToStgExpr (Tick tick expr)
+ = do !_ <- case tick of
+ HpcTick{} -> return ()
+ ProfNote{} -> return ()
+ SourceNote{} -> return ()
+ CoreNote{} -> return ()
+ Breakpoint{} -> panic "coreToStgExpr: breakpoint should not happen"
+ (expr2, fvs, escs) <- coreToStgExpr expr
+ return (StgTick tick expr2, fvs, escs)
coreToStgExpr (Cast expr _)
= coreToStgExpr expr
@@ -540,11 +522,12 @@ coreToStgApp
-- with specified update flag
-> Id -- Function
-> [CoreArg] -- Arguments
+ -> [Tickish Id] -- Debug ticks
-> LneM (StgExpr, FreeVarsInfo, EscVarsSet)
-coreToStgApp _ f args = do
- (args', args_fvs) <- coreToStgArgs args
+coreToStgApp _ f args ticks = do
+ (args', args_fvs, ticks') <- coreToStgArgs args
how_bound <- lookupVarLne f
let
@@ -606,17 +589,18 @@ coreToStgApp _ f args = do
FCallId call -> ASSERT( saturated )
StgOpApp (StgFCallOp call (idUnique f)) args' res_ty
- TickBoxOpId {} -> pprPanic "coreToStg TickBox" $ ppr (f,args')
_other -> StgApp f args'
fvs = fun_fvs `unionFVInfo` args_fvs
vars = fun_escs `unionVarSet` (getFVSet args_fvs)
-- All the free vars of the args are disqualified
-- from being let-no-escaped.
+ tapp = foldr StgTick app (ticks ++ ticks')
+
-- Forcing these fixes a leak in the code generator, noticed while
-- profiling for trac #4367
app `seq` fvs `seq` seqVarSet vars `seq` return (
- app,
+ tapp,
fvs,
vars
)
@@ -628,24 +612,31 @@ coreToStgApp _ f args = do
-- This is the guy that turns applications into A-normal form
-- ---------------------------------------------------------------------------
-coreToStgArgs :: [CoreArg] -> LneM ([StgArg], FreeVarsInfo)
+coreToStgArgs :: [CoreArg] -> LneM ([StgArg], FreeVarsInfo, [Tickish Id])
coreToStgArgs []
- = return ([], emptyFVInfo)
+ = return ([], emptyFVInfo, [])
coreToStgArgs (Type _ : args) = do -- Type argument
- (args', fvs) <- coreToStgArgs args
- return (args', fvs)
+ (args', fvs, ts) <- coreToStgArgs args
+ return (args', fvs, ts)
coreToStgArgs (Coercion _ : args) -- Coercion argument; replace with place holder
- = do { (args', fvs) <- coreToStgArgs args
- ; return (StgVarArg coercionTokenId : args', fvs) }
+ = do { (args', fvs, ts) <- coreToStgArgs args
+ ; return (StgVarArg coercionTokenId : args', fvs, ts) }
+
+coreToStgArgs (Tick t e : args)
+ = ASSERT( not (tickishIsCode t) )
+ do { (args', fvs, ts) <- coreToStgArgs (e : args)
+ ; return (args', fvs, t:ts) }
coreToStgArgs (arg : args) = do -- Non-type argument
- (stg_args, args_fvs) <- coreToStgArgs args
+ (stg_args, args_fvs, ticks) <- coreToStgArgs args
(arg', arg_fvs, _escs) <- coreToStgExpr arg
let
fvs = args_fvs `unionFVInfo` arg_fvs
- stg_arg = case arg' of
+
+ (aticks, arg'') = stripStgTicksTop tickishFloatable arg'
+ stg_arg = case arg'' of
StgApp v [] -> StgVarArg v
StgConApp con [] -> StgVarArg (dataConWorkId con)
StgLit lit -> StgLitArg lit
@@ -673,7 +664,7 @@ coreToStgArgs (arg : args) = do -- Non-type argument
-- We also want to check if a pointer is cast to a non-ptr etc
WARN( bad_args, ptext (sLit "Dangerous-looking argument. Probable cause: bad unsafeCoerce#") $$ ppr arg )
- return (stg_arg : stg_args, fvs)
+ return (stg_arg : stg_args, fvs, ticks ++ aticks)
-- ---------------------------------------------------------------------------
@@ -820,21 +811,31 @@ coreToStgRhs scope_fv_info binders (bndr, rhs) = do
bndr_info = lookupFVInfo scope_fv_info bndr
mkStgRhs :: FreeVarsInfo -> SRT -> Id -> StgBinderInfo -> StgExpr -> StgRhs
+mkStgRhs = mkStgRhs' con_updateable
+ where con_updateable _ _ = False
-mkStgRhs _ _ _ _ (StgConApp con args) = StgRhsCon noCCS con args
-
-mkStgRhs rhs_fvs srt _ binder_info (StgLam bndrs body)
+mkStgRhs' :: (DataCon -> [StgArg] -> Bool)
+ -> FreeVarsInfo -> SRT -> Id -> StgBinderInfo -> StgExpr -> StgRhs
+mkStgRhs' con_updateable rhs_fvs srt bndr binder_info rhs
+ | StgLam bndrs body <- rhs
= StgRhsClosure noCCS binder_info
- (getFVs rhs_fvs)
- ReEntrant
- srt bndrs body
-
-mkStgRhs rhs_fvs srt bndr binder_info rhs
+ (getFVs rhs_fvs)
+ ReEntrant
+ srt bndrs body
+ | StgConApp con args <- unticked_rhs
+ , not (con_updateable con args)
+ = StgRhsCon noCCS con args
+ | otherwise
= StgRhsClosure noCCS binder_info
- (getFVs rhs_fvs)
- upd_flag srt [] rhs
- where
- upd_flag = getUpdateFlag bndr
+ (getFVs rhs_fvs)
+ upd_flag srt [] rhs
+ where
+
+ (_, unticked_rhs) = stripStgTicksTop (not . tickishIsCode) rhs
+
+ upd_flag | isSingleUsed (idDemandInfo bndr) = SingleEntry
+ | otherwise = Updatable
+
{-
SDM: disabled. Eval/Apply can't handle functions with arity zero very
well; and making these into simple non-updatable thunks breaks other
@@ -1106,7 +1107,7 @@ minusFVBinder v fv = fv `delVarEnv` v
-- c.f. CoreFVs.delBinderFV
elementOfFVInfo :: Id -> FreeVarsInfo -> Bool
-elementOfFVInfo id fvs = maybeToBool (lookupVarEnv fvs id)
+elementOfFVInfo id fvs = isJust (lookupVarEnv fvs id)
lookupFVInfo :: FreeVarsInfo -> Id -> StgBinderInfo
-- Find how the given Id is used.
@@ -1159,26 +1160,23 @@ myCollectBinders expr
= go [] expr
where
go bs (Lam b e) = go (b:bs) e
- go bs e@(Tick t e')
- | tickishIsCode t = (reverse bs, e)
- | otherwise = go bs e'
- -- Ignore only non-code source annotations
go bs (Cast e _) = go bs e
go bs e = (reverse bs, e)
-myCollectArgs :: CoreExpr -> (Id, [CoreArg])
+myCollectArgs :: CoreExpr -> (Id, [CoreArg], [Tickish Id])
-- We assume that we only have variables
-- in the function position by now
myCollectArgs expr
- = go expr []
+ = go expr [] []
where
- go (Var v) as = (v, as)
- go (App f a) as = go f (a:as)
- go (Tick _ _) _ = pprPanic "CoreToStg.myCollectArgs" (ppr expr)
- go (Cast e _) as = go e as
- go (Lam b e) as
- | isTyVar b = go e as -- Note [Collect args]
- go _ _ = pprPanic "CoreToStg.myCollectArgs" (ppr expr)
+ go (Var v) as ts = (v, as, ts)
+ go (App f a) as ts = go f (a:as) ts
+ go (Tick t e) as ts = ASSERT( all isTypeArg as )
+ go e as (t:ts) -- ticks can appear inside type apps
+ go (Cast e _) as ts = go e as ts
+ go (Lam b e) as ts
+ | isTyVar b = go e as ts -- Note [Collect args]
+ go _ _ _ = pprPanic "CoreToStg.myCollectArgs" (ppr expr)
-- Note [Collect args]
-- ~~~~~~~~~~~~~~~~~~~
@@ -1190,4 +1188,5 @@ stgArity :: Id -> HowBound -> Arity
stgArity _ (LetBound _ arity) = arity
stgArity f ImportBound = idArity f
stgArity _ LambdaBound = 0
+
\end{code}
diff --git a/compiler/stgSyn/StgLint.lhs b/compiler/stgSyn/StgLint.lhs
index 04349db3dfdf..6adde635eae5 100644
--- a/compiler/stgSyn/StgLint.lhs
+++ b/compiler/stgSyn/StgLint.lhs
@@ -4,6 +4,8 @@
\section[StgLint]{A ``lint'' pass to check for Stg correctness}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module StgLint ( lintStgBindings ) where
import StgSyn
@@ -192,7 +194,7 @@ lintStgExpr (StgLetNoEscape _ _ binds body) = do
addInScopeVars binders $
lintStgExpr body
-lintStgExpr (StgSCC _ _ _ expr) = lintStgExpr expr
+lintStgExpr (StgTick _ expr) = lintStgExpr expr
lintStgExpr (StgCase scrut _ _ bndr _ alts_type alts) = runMaybeT $ do
_ <- MaybeT $ lintStgExpr scrut
@@ -215,8 +217,6 @@ lintStgExpr (StgCase scrut _ _ bndr _ alts_type alts) = runMaybeT $ do
where
bad_bndr = mkDefltMsg bndr tc
-lintStgExpr e = pprPanic "lintStgExpr" (ppr e)
-
lintStgAlts :: [StgAlt]
-> Type -- Type of scrutinee
-> LintM (Maybe Type) -- Just ty => type is accurage
diff --git a/compiler/stgSyn/StgSyn.lhs b/compiler/stgSyn/StgSyn.lhs
index 3fa8c68c1618..64840749da16 100644
--- a/compiler/stgSyn/StgSyn.lhs
+++ b/compiler/stgSyn/StgSyn.lhs
@@ -9,6 +9,7 @@ being one that happens to be ideally suited to spineless tagless code
generation.
\begin{code}
+{-# LANGUAGE CPP #-}
module StgSyn (
GenStgArg(..),
@@ -37,6 +38,7 @@ module StgSyn (
stgBindHasCafRefs, stgArgHasCafRefs, stgRhsArity,
isDllConApp,
stgArgType,
+ stripStgTicksTop,
pprStgBinding, pprStgBindings,
pprStgLVs
@@ -45,8 +47,8 @@ module StgSyn (
#include "HsVersions.h"
import Bitmap
-import CoreSyn ( AltCon )
-import CostCentre ( CostCentreStack, CostCentre )
+import CoreSyn ( AltCon, Tickish )
+import CostCentre ( CostCentreStack )
import DataCon
import DynFlags
import FastString
@@ -54,7 +56,7 @@ import ForeignCall ( ForeignCall )
import Id
import IdInfo ( mayHaveCafRefs )
import Literal ( Literal, literalType )
-import Module
+import Module ( Module )
import Outputable
import Packages ( isDllName )
import Platform
@@ -141,6 +143,14 @@ isAddrRep _ = False
stgArgType :: StgArg -> Type
stgArgType (StgVarArg v) = idType v
stgArgType (StgLitArg lit) = literalType lit
+
+
+-- | Strip ticks of a given type from an STG expression
+stripStgTicksTop :: (Tickish Id -> Bool) -> StgExpr -> ([Tickish Id], StgExpr)
+stripStgTicksTop p = go []
+ where go ts (StgTick t e) | p t = go (t:ts) e
+ go ts other = (reverse ts, other)
+
\end{code}
%************************************************************************
@@ -360,34 +370,17 @@ And so the code for let(rec)-things:
\end{code}
%************************************************************************
-%* *
-\subsubsection{@GenStgExpr@: @scc@ expressions}
-%* *
-%************************************************************************
-
-For @scc@ expressions we introduce a new STG construct.
-
-\begin{code}
- | StgSCC
- CostCentre -- label of SCC expression
- !Bool -- bump the entry count?
- !Bool -- push the cost centre?
- (GenStgExpr bndr occ) -- scc expression
-\end{code}
-
-%************************************************************************
-%* *
-\subsubsection{@GenStgExpr@: @hpc@ expressions}
-%* *
+%* *
+\subsubsection{@GenStgExpr@: @hpc@, @scc@ and other debug annotations}
+%* *
%************************************************************************
Finally for @hpc@ expressions we introduce a new STG construct.
\begin{code}
| StgTick
- Module -- the module of the source of this tick
- Int -- tick number
- (GenStgExpr bndr occ) -- sub expression
+ (Tickish bndr)
+ (GenStgExpr bndr occ) -- sub expression
-- END of GenStgExpr
\end{code}
@@ -737,16 +730,12 @@ pprStgExpr (StgLetNoEscape lvs_whole lvs_rhss bind expr)
char ']'])))
2 (ppr expr)]
-pprStgExpr (StgSCC cc tick push expr)
- = sep [ hsep [scc, ppr cc], pprStgExpr expr ]
- where
- scc | tick && push = ptext (sLit "_scc_")
- | tick = ptext (sLit "_tick_")
- | otherwise = ptext (sLit "_push_")
+pprStgExpr (StgTick tickish expr)
+ = sdocWithDynFlags $ \dflags ->
+ if gopt Opt_PprShowTicks dflags
+ then sep [ ppr tickish, pprStgExpr expr ]
+ else pprStgExpr expr
-pprStgExpr (StgTick m n expr)
- = sep [ hsep [ptext (sLit "_tick_"), pprModule m,text (show n)],
- pprStgExpr expr ]
pprStgExpr (StgCase expr lvs_whole lvs_rhss bndr srt alt_type alts)
= sep [sep [ptext (sLit "case"),
diff --git a/compiler/stranal/DmdAnal.lhs b/compiler/stranal/DmdAnal.lhs
index e9a7ab488f71..a3b7c0b72ad2 100644
--- a/compiler/stranal/DmdAnal.lhs
+++ b/compiler/stranal/DmdAnal.lhs
@@ -7,15 +7,15 @@
-----------------
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
module DmdAnal ( dmdAnalProgram ) where
#include "HsVersions.h"
-import Var ( isTyVar )
import DynFlags
-import WwLib ( deepSplitProductType_maybe )
+import WwLib ( findTypeShape, deepSplitProductType_maybe )
import Demand -- All of it
import CoreSyn
import Outputable
@@ -26,11 +26,8 @@ import Data.List
import DataCon
import Id
import CoreUtils ( exprIsHNF, exprType, exprIsTrivial )
--- import PprCore
import TyCon
-import Type ( eqType )
--- import Pair
--- import Coercion ( coercionKind )
+import Type
import FamInstEnv
import Util
import Maybes ( isJust )
@@ -118,26 +115,29 @@ dmdAnalStar :: AnalEnv
-> Demand -- This one takes a *Demand*
-> CoreExpr -> (BothDmdArg, CoreExpr)
dmdAnalStar env dmd e
- | (cd, defer_and_use) <- toCleanDmd dmd
+ | (cd, defer_and_use) <- toCleanDmd dmd (exprType e)
, (dmd_ty, e') <- dmdAnal env cd e
= (postProcessDmdTypeM defer_and_use dmd_ty, e')
-- Main Demand Analsysis machinery
-dmdAnal :: AnalEnv
+dmdAnal, dmdAnal' :: AnalEnv
-> CleanDemand -- The main one takes a *CleanDemand*
-> CoreExpr -> (DmdType, CoreExpr)
-- The CleanDemand is always strict and not absent
-- See Note [Ensure demand is strict]
-dmdAnal _ _ (Lit lit) = (nopDmdType, Lit lit)
-dmdAnal _ _ (Type ty) = (nopDmdType, Type ty) -- Doesn't happen, in fact
-dmdAnal _ _ (Coercion co) = (nopDmdType, Coercion co)
+dmdAnal env d e = -- pprTrace "dmdAnal" (ppr d <+> ppr e) $
+ dmdAnal' env d e
-dmdAnal env dmd (Var var)
+dmdAnal' _ _ (Lit lit) = (nopDmdType, Lit lit)
+dmdAnal' _ _ (Type ty) = (nopDmdType, Type ty) -- Doesn't happen, in fact
+dmdAnal' _ _ (Coercion co) = (nopDmdType, Coercion co)
+
+dmdAnal' env dmd (Var var)
= (dmdTransform env var dmd, Var var)
-dmdAnal env dmd (Cast e co)
+dmdAnal' env dmd (Cast e co)
= (dmd_ty, Cast e' co)
where
(dmd_ty, e') = dmdAnal env dmd e
@@ -155,24 +155,24 @@ dmdAnal env dmd (Cast e co)
-- a fixpoint. So revert to a vanilla Eval demand
-}
-dmdAnal env dmd (Tick t e)
+dmdAnal' env dmd (Tick t e)
= (dmd_ty, Tick t e')
where
(dmd_ty, e') = dmdAnal env dmd e
-dmdAnal env dmd (App fun (Type ty))
+dmdAnal' env dmd (App fun (Type ty))
= (fun_ty, App fun' (Type ty))
where
(fun_ty, fun') = dmdAnal env dmd fun
-dmdAnal sigs dmd (App fun (Coercion co))
+dmdAnal' sigs dmd (App fun (Coercion co))
= (fun_ty, App fun' (Coercion co))
where
(fun_ty, fun') = dmdAnal sigs dmd fun
-- Lots of the other code is there to make this
-- beautiful, compositional, application rule :-)
-dmdAnal env dmd (App fun arg) -- Non-type arguments
+dmdAnal' env dmd (App fun arg) -- Non-type arguments
= let -- [Type arg handled above]
call_dmd = mkCallDmd dmd
(fun_ty, fun') = dmdAnal env call_dmd fun
@@ -190,7 +190,7 @@ dmdAnal env dmd (App fun arg) -- Non-type arguments
(res_ty `bothDmdType` arg_ty, App fun' arg')
-- this is an anonymous lambda, since @dmdAnalRhs@ uses @collectBinders@
-dmdAnal env dmd (Lam var body)
+dmdAnal' env dmd (Lam var body)
| isTyVar var
= let
(body_ty, body') = dmdAnal env dmd body
@@ -209,7 +209,7 @@ dmdAnal env dmd (Lam var body)
in
(postProcessUnsat defer_and_use lam_ty, Lam var' body')
-dmdAnal env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)])
+dmdAnal' env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)])
-- Only one alternative with a product constructor
| let tycon = dataConTyCon dc
, isProductTyCon tycon
@@ -267,7 +267,7 @@ dmdAnal env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)])
-- , text "res_ty" <+> ppr res_ty ]) $
(res_ty, Case scrut' case_bndr' ty [alt'])
-dmdAnal env dmd (Case scrut case_bndr ty alts)
+dmdAnal' env dmd (Case scrut case_bndr ty alts)
= let -- Case expression with multiple alternatives
(alt_tys, alts') = mapAndUnzip (dmdAnalAlt env dmd) alts
(scrut_ty, scrut') = dmdAnal env cleanEvalDmd scrut
@@ -281,7 +281,7 @@ dmdAnal env dmd (Case scrut case_bndr ty alts)
-- , text "res_ty" <+> ppr res_ty ]) $
(res_ty, Case scrut' case_bndr' ty alts')
-dmdAnal env dmd (Let (NonRec id rhs) body)
+dmdAnal' env dmd (Let (NonRec id rhs) body)
= (body_ty2, Let (NonRec id2 annotated_rhs) body')
where
(sig, lazy_fv, id1, rhs') = dmdAnalRhs NotTopLevel Nothing env id rhs
@@ -306,7 +306,7 @@ dmdAnal env dmd (Let (NonRec id rhs) body)
-- the vanilla call demand seem to be due to (b). So we don't
-- bother to re-analyse the RHS.
-dmdAnal env dmd (Let (Rec pairs) body)
+dmdAnal' env dmd (Let (Rec pairs) body)
= let
(env', lazy_fv, pairs') = dmdFix NotTopLevel env pairs
(body_ty, body') = dmdAnal env' dmd body
@@ -489,8 +489,7 @@ dmdTransform :: AnalEnv -- The strictness environment
dmdTransform env var dmd
| isDataConWorkId var -- Data constructor
- = dmdTransformDataConSig
- (idArity var) (idStrictness var) dmd
+ = dmdTransformDataConSig (idArity var) (idStrictness var) dmd
| gopt Opt_DmdTxDictSel (ae_dflags env),
Just _ <- isClassOpId_maybe var -- Dictionary component selector
@@ -595,9 +594,18 @@ dmdAnalRhs :: TopLevelFlag
-- Process the RHS of the binding, add the strictness signature
-- to the Id, and augment the environment with the signature as well.
dmdAnalRhs top_lvl rec_flag env id rhs
- | Just fn <- unpackTrivial rhs -- See Note [Trivial right-hand sides]
+ | Just fn <- unpackTrivial rhs -- See Note [Demand analysis for trivial right-hand sides]
, let fn_str = getStrictness env fn
- = (fn_str, emptyDmdEnv, set_idStrictness env id fn_str, rhs)
+ fn_fv | isLocalId fn = unitVarEnv fn topDmd
+ | otherwise = emptyDmdEnv
+ -- Note [Remember to demand the function itself]
+ -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ -- fn_fv: don't forget to produce a demand for fn itself
+ -- Lacking this caused Trac #9128
+ -- The demand is very conservative (topDmd), but that doesn't
+ -- matter; trivial bindings are usually inlined, so it only
+ -- kicks in for top-level bindings and NOINLINE bindings
+ = (fn_str, fn_fv, set_idStrictness env id fn_str, rhs)
| otherwise
= (sig_ty, lazy_fv, id', mkLams bndrs' body')
@@ -640,7 +648,7 @@ dmdAnalRhs top_lvl rec_flag env id rhs
unpackTrivial :: CoreExpr -> Maybe Id
-- Returns (Just v) if the arg is really equal to v, modulo
-- casts, type applications etc
--- See Note [Trivial right-hand sides]
+-- See Note [Demand analysis for trivial right-hand sides]
unpackTrivial (Var v) = Just v
unpackTrivial (Cast e _) = unpackTrivial e
unpackTrivial (Lam v e) | isTyVar v = unpackTrivial e
@@ -648,15 +656,23 @@ unpackTrivial (App e a) | isTypeArg a = unpackTrivial e
unpackTrivial _ = Nothing
\end{code}
-Note [Trivial right-hand sides]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note [Demand analysis for trivial right-hand sides]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
foo = plusInt |> co
where plusInt is an arity-2 function with known strictness. Clearly
we want plusInt's strictness to propagate to foo! But because it has
-no manifest lambdas, it won't do so automatically. So we have a
+no manifest lambdas, it won't do so automatically, and indeed 'co' might
+have type (Int->Int->Int) ~ T, so we *can't* eta-expand. So we have a
special case for right-hand sides that are "trivial", namely variables,
-casts, type applications, and the like.
+casts, type applications, and the like.
+
+Note that this can mean that 'foo' has an arity that is smaller than that
+indicated by its demand info. e.g. if co :: (Int->Int->Int) ~ T, then
+foo's arity will be zero (see Note [exprArity invariant] in CoreArity),
+but its demand signature will be that of plusInt. A small example is the
+test case of Trac #8963.
+
Note [Product demands for function body]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -725,9 +741,8 @@ addLazyFVs dmd_ty lazy_fvs
-- call to f. So we just get an L demand for x for g.
\end{code}
-Note [do not strictify the argument dictionaries of a dfun]
+Note [Do not strictify the argument dictionaries of a dfun]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
The typechecker can tie recursive knots involving dfuns, so we do the
conservative thing and refrain from strictifying a dfun's argument
dictionaries.
@@ -739,17 +754,10 @@ annotateBndr :: AnalEnv -> DmdType -> Var -> (DmdType, Var)
-- according to the result demand of the provided demand type
-- No effect on the argument demands
annotateBndr env dmd_ty var
- | isTyVar var = (dmd_ty, var)
- | otherwise = (dmd_ty', set_idDemandInfo env var dmd')
+ | isId var = (dmd_ty', setIdDemandInfo var dmd)
+ | otherwise = (dmd_ty, var)
where
- (dmd_ty', dmd) = peelFV dmd_ty var
-
- dmd' | gopt Opt_DictsStrict (ae_dflags env)
- -- We never want to strictify a recursive let. At the moment
- -- annotateBndr is only call for non-recursive lets; if that
- -- changes, we need a RecFlag parameter and another guard here.
- = strictifyDictDmd (idType var) dmd
- | otherwise = dmd
+ (dmd_ty', dmd) = findBndrDmd env False dmd_ty var
annotateBndrs :: AnalEnv -> DmdType -> [Var] -> (DmdType, [Var])
annotateBndrs env = mapAccumR (annotateBndr env)
@@ -774,7 +782,7 @@ annotateLamIdBndr env arg_of_dfun dmd_ty one_shot id
-- Only called for Ids
= ASSERT( isId id )
-- pprTrace "annLamBndr" (vcat [ppr id, ppr _dmd_ty]) $
- (final_ty, setOneShotness one_shot (set_idDemandInfo env id dmd'))
+ (final_ty, setOneShotness one_shot (setIdDemandInfo id dmd))
where
-- Watch out! See note [Lambda-bound unfoldings]
final_ty = case maybeUnfoldingTemplate (idUnfolding id) of
@@ -784,13 +792,7 @@ annotateLamIdBndr env arg_of_dfun dmd_ty one_shot id
(unf_ty, _) = dmdAnalStar env dmd unf
main_ty = addDemand dmd dmd_ty'
- (dmd_ty', dmd) = peelFV dmd_ty id
-
- dmd' | gopt Opt_DictsStrict (ae_dflags env),
- -- see Note [do not strictify the argument dictionaries of a dfun]
- not arg_of_dfun
- = strictifyDictDmd (idType id) dmd
- | otherwise = dmd
+ (dmd_ty', dmd) = findBndrDmd env arg_of_dfun dmd_ty id
deleteFVs :: DmdType -> [Var] -> DmdType
deleteFVs (DmdType fvs dmds res) bndrs
@@ -1076,18 +1078,39 @@ extendSigsWithLam :: AnalEnv -> Id -> AnalEnv
-- Extend the AnalEnv when we meet a lambda binder
extendSigsWithLam env id
| isId id
- , isStrictDmd (idDemandInfo id) || ae_virgin env
+ , isStrictDmd (idDemandInfo id) || ae_virgin env
-- See Note [Optimistic CPR in the "virgin" case]
-- See Note [Initial CPR for strict binders]
, Just (dc,_,_,_) <- deepSplitProductType_maybe (ae_fam_envs env) $ idType id
= extendAnalEnv NotTopLevel env id (cprProdSig (dataConRepArity dc))
- | otherwise
+ | otherwise
= env
-set_idDemandInfo :: AnalEnv -> Id -> Demand -> Id
-set_idDemandInfo env id dmd
- = setIdDemandInfo id (zapDemand (ae_dflags env) dmd)
+findBndrDmd :: AnalEnv -> Bool -> DmdType -> Id -> (DmdType, Demand)
+-- See Note [Trimming a demand to a type] in Demand.lhs
+findBndrDmd env arg_of_dfun dmd_ty id
+ = (dmd_ty', dmd')
+ where
+ dmd' = zapDemand (ae_dflags env) $
+ strictify $
+ trimToType starting_dmd (findTypeShape fam_envs id_ty)
+
+ (dmd_ty', starting_dmd) = peelFV dmd_ty id
+
+ id_ty = idType id
+
+ strictify dmd
+ | gopt Opt_DictsStrict (ae_dflags env)
+ -- We never want to strictify a recursive let. At the moment
+ -- annotateBndr is only call for non-recursive lets; if that
+ -- changes, we need a RecFlag parameter and another guard here.
+ , not arg_of_dfun -- See Note [Do not strictify the argument dictionaries of a dfun]
+ = strictifyDictDmd id_ty dmd
+ | otherwise
+ = dmd
+
+ fam_envs = ae_fam_envs env
set_idStrictness :: AnalEnv -> Id -> StrictSig -> Id
set_idStrictness env id sig
diff --git a/compiler/stranal/WorkWrap.lhs b/compiler/stranal/WorkWrap.lhs
index f5bc18b69ebe..5b9d0a3083c0 100644
--- a/compiler/stranal/WorkWrap.lhs
+++ b/compiler/stranal/WorkWrap.lhs
@@ -4,7 +4,8 @@
\section[WorkWrap]{Worker/wrapper-generating back-end of strictness analyser}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -259,16 +260,28 @@ tryWW dflags fam_envs is_rec fn_id rhs
-- Furthermore, don't even expose strictness info
= return [ (fn_id, rhs) ]
+ | isStableUnfolding (realIdUnfolding fn_id)
+ = return [ (fn_id, rhs) ]
+ -- See Note [Don't w/w INLINE things]
+ -- and Note [Don't w/w INLINABLE things]
+ -- NB: use realIdUnfolding because we want to see the unfolding
+ -- even if it's a loop breaker!
+
+ | certainlyWillInline dflags (idUnfolding fn_id)
+ = let inline_rule = mkInlineUnfolding Nothing rhs
+ in return [ (fn_id `setIdUnfolding` inline_rule, rhs) ]
+ -- Note [Don't w/w inline small non-loop-breaker things]
+ -- NB: use idUnfolding because we don't want to apply
+ -- this criterion to a loop breaker!
+
+ | is_fun
+ = splitFun dflags fam_envs new_fn_id fn_info wrap_dmds res_info rhs
+
+ | is_thunk -- See Note [Thunk splitting]
+ = splitThunk dflags fam_envs is_rec new_fn_id rhs
+
| otherwise
- = do
- let doSplit | is_fun = splitFun dflags fam_envs new_fn_id fn_info wrap_dmds res_info rhs
- | is_thunk = splitThunk dflags fam_envs is_rec new_fn_id rhs
- -- See Note [Thunk splitting]
- | otherwise = return Nothing
- try <- doSplit
- case try of
- Nothing -> return $ [ (new_fn_id, rhs) ]
- Just binds -> checkSize dflags new_fn_id rhs binds
+ = return [ (new_fn_id, rhs) ]
where
fn_info = idInfo fn_id
@@ -291,29 +304,10 @@ tryWW dflags fam_envs is_rec fn_id rhs
is_fun = notNull wrap_dmds
is_thunk = not is_fun && not (exprIsHNF rhs)
----------------------
-checkSize :: DynFlags -> Id -> CoreExpr -> [(Id,CoreExpr)] -> UniqSM [(Id,CoreExpr)]
-checkSize dflags fn_id rhs thing_inside
- | isStableUnfolding (realIdUnfolding fn_id)
- = return [ (fn_id, rhs) ]
- -- See Note [Don't w/w INLINE things]
- -- and Note [Don't w/w INLINABLE things]
- -- NB: use realIdUnfolding because we want to see the unfolding
- -- even if it's a loop breaker!
-
- | certainlyWillInline dflags (idUnfolding fn_id)
- = return [ (fn_id `setIdUnfolding` inline_rule, rhs) ]
- -- Note [Don't w/w inline small non-loop-breaker things]
- -- NB: use idUnfolding because we don't want to apply
- -- this criterion to a loop breaker!
-
- | otherwise = return thing_inside
- where
- inline_rule = mkInlineUnfolding Nothing rhs
---------------------
splitFun :: DynFlags -> FamInstEnvs -> Id -> IdInfo -> [Demand] -> DmdResult -> CoreExpr
- -> UniqSM (Maybe [(Id, CoreExpr)])
+ -> UniqSM [(Id, CoreExpr)]
splitFun dflags fam_envs fn_id fn_info wrap_dmds res_info rhs
= WARN( not (wrap_dmds `lengthIs` arity), ppr fn_id <+> (ppr arity $$ ppr wrap_dmds $$ ppr res_info) ) do
-- The arity should match the signature
@@ -361,12 +355,11 @@ splitFun dflags fam_envs fn_id fn_info wrap_dmds res_info rhs
`setIdOccInfo` NoOccInfo
-- Zap any loop-breaker-ness, to avoid bleating from Lint
-- about a loop breaker with an INLINE rule
- return $ Just [(work_id, work_rhs), (wrap_id, wrap_rhs)]
+ return $ [(work_id, work_rhs), (wrap_id, wrap_rhs)]
-- Worker first, because wrapper mentions it
-- mkWwBodies has already built a wrap_rhs with an INLINE pragma wrapped around it
- Nothing ->
- return Nothing
+ Nothing -> return [(fn_id, rhs)]
where
fun_ty = idType fn_id
inl_prag = inlinePragInfo fn_info
@@ -452,11 +445,11 @@ then the splitting will go deeper too.
-- --> x = let x = e in
-- case x of (a,b) -> let x = (a,b) in x
-splitThunk :: DynFlags -> FamInstEnvs -> RecFlag -> Var -> Expr Var -> UniqSM (Maybe [(Var, Expr Var)])
-splitThunk dflags fam_envs is_rec fn_id rhs = do
- (useful,_, wrap_fn, work_fn) <- mkWWstr dflags fam_envs [fn_id]
- let res = [ (fn_id, Let (NonRec fn_id rhs) (wrap_fn (work_fn (Var fn_id)))) ]
- if useful then ASSERT2( isNonRec is_rec, ppr fn_id ) -- The thunk must be non-recursive
- return (Just res)
- else return Nothing
+splitThunk :: DynFlags -> FamInstEnvs -> RecFlag -> Var -> Expr Var -> UniqSM [(Var, Expr Var)]
+splitThunk dflags fam_envs is_rec fn_id rhs
+ = do { (useful,_, wrap_fn, work_fn) <- mkWWstr dflags fam_envs [fn_id]
+ ; let res = [ (fn_id, Let (NonRec fn_id rhs) (wrap_fn (work_fn (Var fn_id)))) ]
+ ; if useful then ASSERT2( isNonRec is_rec, ppr fn_id ) -- The thunk must be non-recursive
+ return res
+ else return [(fn_id, rhs)] }
\end{code}
diff --git a/compiler/stranal/WwLib.lhs b/compiler/stranal/WwLib.lhs
index 57937d696f45..7a9845b3d717 100644
--- a/compiler/stranal/WwLib.lhs
+++ b/compiler/stranal/WwLib.lhs
@@ -4,7 +4,11 @@
\section[WwLib]{A library for the ``worker\/wrapper'' back-end to the strictness analyser}
\begin{code}
-module WwLib ( mkWwBodies, mkWWstr, mkWorkerArgs, deepSplitProductType_maybe ) where
+{-# LANGUAGE CPP #-}
+
+module WwLib ( mkWwBodies, mkWWstr, mkWorkerArgs
+ , deepSplitProductType_maybe, findTypeShape
+ ) where
#include "HsVersions.h"
@@ -506,6 +510,12 @@ match the number of constructor arguments; this happened in Trac #8037.
If so, the worker/wrapper split doesn't work right and we get a Core Lint
bug. The fix here is simply to decline to do w/w if that happens.
+%************************************************************************
+%* *
+ Type scrutiny that is specfic to demand analysis
+%* *
+%************************************************************************
+
\begin{code}
deepSplitProductType_maybe :: FamInstEnvs -> Type -> Maybe (DataCon, [Type], [Type], Coercion)
-- If deepSplitProductType_maybe ty = Just (dc, tys, arg_tys, co)
@@ -529,9 +539,32 @@ deepSplitCprType_maybe fam_envs con_tag ty
, Just (tc, tc_args) <- splitTyConApp_maybe ty1
, isDataTyCon tc
, let cons = tyConDataCons tc
- con = ASSERT( cons `lengthAtLeast` con_tag ) cons !! (con_tag - fIRST_TAG)
+ , cons `lengthAtLeast` con_tag -- This might not be true if we import the
+ -- type constructor via a .hs-bool file (#8743)
+ , let con = cons !! (con_tag - fIRST_TAG)
= Just (con, tc_args, dataConInstArgTys con tc_args, co)
deepSplitCprType_maybe _ _ _ = Nothing
+
+findTypeShape :: FamInstEnvs -> Type -> TypeShape
+-- Uncover the arrow and product shape of a type
+-- The data type TypeShape is defined in Demand
+-- See Note [Trimming a demand to a type] in Demand
+findTypeShape fam_envs ty
+ | Just (_, ty') <- splitForAllTy_maybe ty
+ = findTypeShape fam_envs ty'
+
+ | Just (tc, tc_args) <- splitTyConApp_maybe ty
+ , Just con <- isDataProductTyCon_maybe tc
+ = TsProd (map (findTypeShape fam_envs) $ dataConInstArgTys con tc_args)
+
+ | Just (_, res) <- splitFunTy_maybe ty
+ = TsFun (findTypeShape fam_envs res)
+
+ | Just (_, ty') <- topNormaliseType_maybe fam_envs ty
+ = findTypeShape fam_envs ty'
+
+ | otherwise
+ = TsUnk
\end{code}
@@ -701,7 +734,7 @@ mk_absent_let dflags arg
where
arg_ty = idType arg
abs_rhs = mkRuntimeErrorApp aBSENT_ERROR_ID arg_ty msg
- msg = showSDocDebug dflags (ppr arg <+> ppr (idType arg))
+ msg = showSDoc dflags (ppr arg <+> ppr (idType arg))
mk_seq_case :: Id -> CoreExpr -> CoreExpr
mk_seq_case arg body = Case (Var arg) (sanitiseCaseBndr arg) (exprType body) [(DEFAULT, [], body)]
diff --git a/compiler/typecheck/FamInst.lhs b/compiler/typecheck/FamInst.lhs
index 88212415c44b..d0b2d0da5a9c 100644
--- a/compiler/typecheck/FamInst.lhs
+++ b/compiler/typecheck/FamInst.lhs
@@ -1,8 +1,8 @@
The @FamInst@ type: family instance heads
\begin{code}
-{-# LANGUAGE GADTs #-}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, GADTs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -60,7 +60,7 @@ newFamInst :: FamFlavor -> CoAxiom Unbranched -> TcRnIf gbl lcl FamInst
-- Called from the vectoriser monad too, hence the rather general type
newFamInst flavor axiom@(CoAxiom { co_ax_branches = FirstBranch branch
, co_ax_tc = fam_tc })
- = do { (subst, tvs') <- tcInstSkolTyVarsLoc loc tvs
+ = do { (subst, tvs') <- tcInstSigTyVarsLoc loc tvs
; return (FamInst { fi_fam = fam_tc_name
, fi_flavor = flavor
, fi_tcs = roughMatchTcs lhs
@@ -217,9 +217,12 @@ tcLookupFamInst tycon tys
| otherwise
= do { instEnv <- tcGetFamInstEnvs
; let mb_match = lookupFamInstEnv instEnv tycon tys
- ; traceTc "lookupFamInst" ((ppr tycon <+> ppr tys) $$
- pprTvBndrs (varSetElems (tyVarsOfTypes tys)) $$
- ppr mb_match $$ ppr instEnv)
+ ; traceTc "lookupFamInst" $
+ vcat [ ppr tycon <+> ppr tys
+ , pprTvBndrs (varSetElems (tyVarsOfTypes tys))
+ , ppr mb_match
+ -- , ppr instEnv
+ ]
; case mb_match of
[] -> return Nothing
(match:_)
@@ -297,8 +300,11 @@ checkForConflicts :: FamInstEnvs -> FamInst -> TcM Bool
checkForConflicts inst_envs fam_inst
= do { let conflicts = lookupFamInstEnvConflicts inst_envs fam_inst
no_conflicts = null conflicts
- ; traceTc "checkForConflicts" (ppr (map fim_instance conflicts) $$
- ppr fam_inst $$ ppr inst_envs)
+ ; traceTc "checkForConflicts" $
+ vcat [ ppr (map fim_instance conflicts)
+ , ppr fam_inst
+ -- , ppr inst_envs
+ ]
; unless no_conflicts $ conflictInstErr fam_inst conflicts
; return no_conflicts }
diff --git a/compiler/typecheck/FunDeps.lhs b/compiler/typecheck/FunDeps.lhs
index 1dc96aa03721..5cfd22664ac4 100644
--- a/compiler/typecheck/FunDeps.lhs
+++ b/compiler/typecheck/FunDeps.lhs
@@ -8,6 +8,8 @@ FunDeps - functional dependencies
It's better to read it as: "if we know these, then we're going to know these"
\begin{code}
+{-# LANGUAGE CPP #-}
+
module FunDeps (
FDEq (..),
Equation(..), pprEquation,
@@ -26,8 +28,8 @@ import Unify
import InstEnv
import VarSet
import VarEnv
-import Maybes( firstJusts )
import Outputable
+import ErrUtils( Validity(..), allValid )
import Util
import FastString
@@ -415,7 +417,7 @@ makes instance inference go into a loop, because it requires the constraint
\begin{code}
checkInstCoverage :: Bool -- Be liberal
-> Class -> [PredType] -> [Type]
- -> Maybe SDoc
+ -> Validity
-- "be_liberal" flag says whether to use "liberal" coverage of
-- See Note [Coverage Condition] below
--
@@ -424,14 +426,14 @@ checkInstCoverage :: Bool -- Be liberal
-- Just msg => coverage problem described by msg
checkInstCoverage be_liberal clas theta inst_taus
- = firstJusts (map fundep_ok fds)
+ = allValid (map fundep_ok fds)
where
(tyvars, fds) = classTvsFds clas
fundep_ok fd
| if be_liberal then liberal_ok else conservative_ok
- = Nothing
+ = IsValid
| otherwise
- = Just msg
+ = NotValid msg
where
(ls,rs) = instFD fd tyvars inst_taus
ls_tvs = closeOverKinds (tyVarsOfTypes ls) -- See Note [Closing over kinds in coverage]
@@ -559,7 +561,7 @@ if s1 matches
\begin{code}
checkFunDeps :: (InstEnv, InstEnv) -> ClsInst
-> Maybe [ClsInst] -- Nothing <=> ok
- -- Just dfs <=> conflict with dfs
+ -- Just dfs <=> conflict with dfs
-- Check wheher adding DFunId would break functional-dependency constraints
-- Used only for instance decls defined in the module being compiled
checkFunDeps inst_envs ispec
diff --git a/compiler/typecheck/Inst.lhs b/compiler/typecheck/Inst.lhs
index e934984383a2..a27c0bd0f600 100644
--- a/compiler/typecheck/Inst.lhs
+++ b/compiler/typecheck/Inst.lhs
@@ -6,7 +6,8 @@
The @Inst@ type: dictionaries or method instances
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -48,7 +49,6 @@ import TcMType
import Type
import Coercion ( Role(..) )
import TcType
-import Unify
import HscTypes
import Id
import Name
@@ -59,9 +59,9 @@ import PrelNames
import SrcLoc
import DynFlags
import Bag
-import Maybes
import Util
import Outputable
+import Control.Monad( unless )
import Data.List( mapAccumL )
\end{code}
@@ -382,14 +382,15 @@ syntaxNameCtxt name orig ty tidy_env
\begin{code}
getOverlapFlag :: TcM OverlapFlag
-getOverlapFlag
+getOverlapFlag
= do { dflags <- getDynFlags
; let overlap_ok = xopt Opt_OverlappingInstances dflags
incoherent_ok = xopt Opt_IncoherentInstances dflags
- safeOverlap = safeLanguageOn dflags
- overlap_flag | incoherent_ok = Incoherent safeOverlap
- | overlap_ok = OverlapOk safeOverlap
- | otherwise = NoOverlap safeOverlap
+ use x = OverlapFlag { isSafeOverlap = safeLanguageOn dflags
+ , overlapMode = x }
+ overlap_flag | incoherent_ok = use Incoherent
+ | overlap_ok = use Overlaps
+ | otherwise = use NoOverlap
; return overlap_flag }
@@ -408,22 +409,24 @@ tcExtendLocalInstEnv :: [ClsInst] -> TcM a -> TcM a
tcExtendLocalInstEnv dfuns thing_inside
= do { traceDFuns dfuns
; env <- getGblEnv
- ; inst_env' <- foldlM addLocalInst (tcg_inst_env env) dfuns
- ; let env' = env { tcg_insts = dfuns ++ tcg_insts env,
- tcg_inst_env = inst_env' }
+ ; (inst_env', cls_insts') <- foldlM addLocalInst
+ (tcg_inst_env env, tcg_insts env)
+ dfuns
+ ; let env' = env { tcg_insts = cls_insts'
+ , tcg_inst_env = inst_env' }
; setGblEnv env' thing_inside }
-addLocalInst :: InstEnv -> ClsInst -> TcM InstEnv
--- Check that the proposed new instance is OK,
+addLocalInst :: (InstEnv, [ClsInst]) -> ClsInst -> TcM (InstEnv, [ClsInst])
+-- Check that the proposed new instance is OK,
-- and then add it to the home inst env
-- If overwrite_inst, then we can overwrite a direct match
-addLocalInst home_ie ispec
+addLocalInst (home_ie, my_insts) ispec
= do {
-- Instantiate the dfun type so that we extend the instance
-- envt with completely fresh template variables
-- This is important because the template variables must
-- not overlap with anything in the things being looked up
- -- (since we do unification).
+ -- (since we do unification).
--
-- We use tcInstSkolType because we don't want to allocate fresh
-- *meta* type variables.
@@ -436,9 +439,23 @@ addLocalInst home_ie ispec
-- Load imported instances, so that we report
-- duplicates correctly
- eps <- getEps
- ; let inst_envs = (eps_inst_env eps, home_ie)
- (tvs, cls, tys) = instanceHead ispec
+
+ -- 'matches' are existing instance declarations that are less
+ -- specific than the new one
+ -- 'dups' are those 'matches' that are equal to the new one
+ ; isGHCi <- getIsGHCi
+ ; eps <- getEps
+ ; let (home_ie', my_insts')
+ | isGHCi = ( deleteFromInstEnv home_ie ispec
+ , filterOut (identicalInstHead ispec) my_insts)
+ | otherwise = (home_ie, my_insts)
+ -- If there is a home-package duplicate instance,
+ -- silently delete it
+
+ (_tvs, cls, tys) = instanceHead ispec
+ inst_envs = (eps_inst_env eps, home_ie')
+ (matches, _, _) = lookupInstEnv inst_envs cls tys
+ dups = filter (identicalInstHead ispec) (map fst matches)
-- Check functional dependencies
; case checkFunDeps inst_envs ispec of
@@ -446,37 +463,17 @@ addLocalInst home_ie ispec
Nothing -> return ()
-- Check for duplicate instance decls
- ; let (matches, unifs, _) = lookupInstEnv inst_envs cls tys
- dup_ispecs = [ dup_ispec
- | (dup_ispec, _) <- matches
- , let dup_tys = is_tys dup_ispec
- , isJust (tcMatchTys (mkVarSet tvs) tys dup_tys)]
-
- -- Find memebers of the match list which ispec itself matches.
- -- If the match is 2-way, it's a duplicate
- -- If it's a duplicate, but we can overwrite home package dups, then overwrite
- ; isGHCi <- getIsGHCi
- ; overlapFlag <- getOverlapFlag
- ; case isGHCi of
- False -> case dup_ispecs of
- dup : _ -> dupInstErr ispec dup >> return (extendInstEnv home_ie ispec)
- [] -> return (extendInstEnv home_ie ispec)
- True -> case (dup_ispecs, home_ie_matches, unifs, overlapFlag) of
- (_, _:_, _, _) -> return (overwriteInstEnv home_ie ispec)
- (dup:_, [], _, _) -> dupInstErr ispec dup >> return (extendInstEnv home_ie ispec)
- ([], _, u:_, NoOverlap _) -> overlappingInstErr ispec u >> return (extendInstEnv home_ie ispec)
- _ -> return (extendInstEnv home_ie ispec)
- where (homematches, _) = lookupInstEnv' home_ie cls tys
- home_ie_matches = [ dup_ispec
- | (dup_ispec, _) <- homematches
- , let dup_tys = is_tys dup_ispec
- , isJust (tcMatchTys (mkVarSet tvs) tys dup_tys)] }
+ ; unless (null dups) $
+ dupInstErr ispec (head dups)
+
+ ; return (extendInstEnv home_ie' ispec, ispec:my_insts') }
traceDFuns :: [ClsInst] -> TcRn ()
traceDFuns ispecs
= traceTc "Adding instances:" (vcat (map pp ispecs))
where
- pp ispec = ppr (instanceDFunId ispec) <+> colon <+> ppr ispec
+ pp ispec = hang (ppr (instanceDFunId ispec) <+> colon)
+ 2 (ppr ispec)
-- Print the dfun name itself too
funDepErr :: ClsInst -> [ClsInst] -> TcRn ()
@@ -489,11 +486,6 @@ dupInstErr ispec dup_ispec
= addClsInstsErr (ptext (sLit "Duplicate instance declarations:"))
[ispec, dup_ispec]
-overlappingInstErr :: ClsInst -> ClsInst -> TcRn ()
-overlappingInstErr ispec dup_ispec
- = addClsInstsErr (ptext (sLit "Overlapping instance declarations:"))
- [ispec, dup_ispec]
-
addClsInstsErr :: SDoc -> [ClsInst] -> TcRn ()
addClsInstsErr herald ispecs
= setSrcSpan (getSrcSpan (head sorted)) $
diff --git a/compiler/typecheck/TcAnnotations.lhs b/compiler/typecheck/TcAnnotations.lhs
index e12552f419b8..cbd19cf8f33f 100644
--- a/compiler/typecheck/TcAnnotations.lhs
+++ b/compiler/typecheck/TcAnnotations.lhs
@@ -5,6 +5,8 @@
\section[TcAnnotations]{Typechecking annotations}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcAnnotations ( tcAnnotations, annCtxt ) where
#ifdef GHCI
diff --git a/compiler/typecheck/TcArrows.lhs b/compiler/typecheck/TcArrows.lhs
index b427dd5409eb..eab894195666 100644
--- a/compiler/typecheck/TcArrows.lhs
+++ b/compiler/typecheck/TcArrows.lhs
@@ -5,16 +5,11 @@
Typecheck arrow notation
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
+{-# LANGUAGE RankNTypes #-}
module TcArrows ( tcProc ) where
-import {-# SOURCE #-} TcExpr( tcMonoExpr, tcInferRho, tcSyntaxOp, tcCheckId, tcPolyExpr )
+import {-# SOURCE #-} TcExpr( tcMonoExpr, tcInferRho, tcSyntaxOp, tcCheckId, tcPolyExpr )
import HsSyn
import TcMatches
@@ -77,32 +72,32 @@ Note that
%************************************************************************
-%* *
- Proc
-%* *
+%* *
+ Proc
+%* *
%************************************************************************
\begin{code}
-tcProc :: InPat Name -> LHsCmdTop Name -- proc pat -> expr
- -> TcRhoType -- Expected type of whole proc expression
+tcProc :: InPat Name -> LHsCmdTop Name -- proc pat -> expr
+ -> TcRhoType -- Expected type of whole proc expression
-> TcM (OutPat TcId, LHsCmdTop TcId, TcCoercion)
tcProc pat cmd exp_ty
= newArrowScope $
- do { (co, (exp_ty1, res_ty)) <- matchExpectedAppTy exp_ty
- ; (co1, (arr_ty, arg_ty)) <- matchExpectedAppTy exp_ty1
- ; let cmd_env = CmdEnv { cmd_arr = arr_ty }
+ do { (co, (exp_ty1, res_ty)) <- matchExpectedAppTy exp_ty
+ ; (co1, (arr_ty, arg_ty)) <- matchExpectedAppTy exp_ty1
+ ; let cmd_env = CmdEnv { cmd_arr = arr_ty }
; (pat', cmd') <- tcPat ProcExpr pat arg_ty $
- tcCmdTop cmd_env cmd (unitTy, res_ty)
+ tcCmdTop cmd_env cmd (unitTy, res_ty)
; let res_co = mkTcTransCo co (mkTcAppCo co1 (mkTcNomReflCo res_ty))
; return (pat', cmd', res_co) }
\end{code}
%************************************************************************
-%* *
- Commands
-%* *
+%* *
+ Commands
+%* *
%************************************************************************
\begin{code}
@@ -112,7 +107,7 @@ type CmdArgType = TcTauType -- carg_type, a nested tuple
data CmdEnv
= CmdEnv {
- cmd_arr :: TcType -- arrow type constructor, of kind *->*->*
+ cmd_arr :: TcType -- arrow type constructor, of kind *->*->*
}
mkCmdArrTy :: CmdEnv -> TcTauType -> TcTauType -> TcTauType
@@ -126,27 +121,27 @@ tcCmdTop :: CmdEnv
tcCmdTop env (L loc (HsCmdTop cmd _ _ names)) cmd_ty@(cmd_stk, res_ty)
= setSrcSpan loc $
- do { cmd' <- tcCmd env cmd cmd_ty
- ; names' <- mapM (tcSyntaxName ProcOrigin (cmd_arr env)) names
- ; return (L loc $ HsCmdTop cmd' cmd_stk res_ty names') }
+ do { cmd' <- tcCmd env cmd cmd_ty
+ ; names' <- mapM (tcSyntaxName ProcOrigin (cmd_arr env)) names
+ ; return (L loc $ HsCmdTop cmd' cmd_stk res_ty names') }
----------------------------------------
tcCmd :: CmdEnv -> LHsCmd Name -> CmdType -> TcM (LHsCmd TcId)
- -- The main recursive function
+ -- The main recursive function
tcCmd env (L loc cmd) res_ty
= setSrcSpan loc $ do
- { cmd' <- tc_cmd env cmd res_ty
- ; return (L loc cmd') }
+ { cmd' <- tc_cmd env cmd res_ty
+ ; return (L loc cmd') }
tc_cmd :: CmdEnv -> HsCmd Name -> CmdType -> TcM (HsCmd TcId)
tc_cmd env (HsCmdPar cmd) res_ty
- = do { cmd' <- tcCmd env cmd res_ty
- ; return (HsCmdPar cmd') }
+ = do { cmd' <- tcCmd env cmd res_ty
+ ; return (HsCmdPar cmd') }
tc_cmd env (HsCmdLet binds (L body_loc body)) res_ty
- = do { (binds', body') <- tcLocalBinds binds $
- setSrcSpan body_loc $
- tc_cmd env body res_ty
- ; return (HsCmdLet binds' (L body_loc body')) }
+ = do { (binds', body') <- tcLocalBinds binds $
+ setSrcSpan body_loc $
+ tc_cmd env body res_ty
+ ; return (HsCmdLet binds' (L body_loc body')) }
tc_cmd env in_cmd@(HsCmdCase scrut matches) (stk, res_ty)
= addErrCtxt (cmdCtxt in_cmd) $ do
@@ -166,25 +161,25 @@ tc_cmd env (HsCmdIf Nothing pred b1 b2) res_ty -- Ordinary 'if'
}
tc_cmd env (HsCmdIf (Just fun) pred b1 b2) res_ty -- Rebindable syntax for if
- = do { pred_ty <- newFlexiTyVarTy openTypeKind
+ = do { pred_ty <- newFlexiTyVarTy openTypeKind
-- For arrows, need ifThenElse :: forall r. T -> r -> r -> r
-- because we're going to apply it to the environment, not
-- the return value.
; (_, [r_tv]) <- tcInstSkolTyVars [alphaTyVar]
- ; let r_ty = mkTyVarTy r_tv
+ ; let r_ty = mkTyVarTy r_tv
; let if_ty = mkFunTys [pred_ty, r_ty, r_ty] r_ty
; checkTc (not (r_tv `elemVarSet` tyVarsOfType pred_ty))
(ptext (sLit "Predicate type of `ifThenElse' depends on result type"))
- ; fun' <- tcSyntaxOp IfOrigin fun if_ty
- ; pred' <- tcMonoExpr pred pred_ty
- ; b1' <- tcCmd env b1 res_ty
- ; b2' <- tcCmd env b2 res_ty
+ ; fun' <- tcSyntaxOp IfOrigin fun if_ty
+ ; pred' <- tcMonoExpr pred pred_ty
+ ; b1' <- tcCmd env b1 res_ty
+ ; b2' <- tcCmd env b2 res_ty
; return (HsCmdIf (Just fun') pred' b1' b2')
}
-------------------------------------------
--- Arrow application
--- (f -< a) or (f -<< a)
+-- Arrow application
+-- (f -< a) or (f -<< a)
--
-- D |- fun :: a t1 t2
-- D,G |- arg :: t1
@@ -199,16 +194,16 @@ tc_cmd env (HsCmdIf (Just fun) pred b1 b2) res_ty -- Rebindable syntax for if
-- (plus -<< requires ArrowApply)
tc_cmd env cmd@(HsCmdArrApp fun arg _ ho_app lr) (_, res_ty)
- = addErrCtxt (cmdCtxt cmd) $
+ = addErrCtxt (cmdCtxt cmd) $
do { arg_ty <- newFlexiTyVarTy openTypeKind
- ; let fun_ty = mkCmdArrTy env arg_ty res_ty
- ; fun' <- select_arrow_scope (tcMonoExpr fun fun_ty)
+ ; let fun_ty = mkCmdArrTy env arg_ty res_ty
+ ; fun' <- select_arrow_scope (tcMonoExpr fun fun_ty)
-- ToDo: There should be no need for the escapeArrowScope stuff
-- See Note [Escaping the arrow scope] in TcRnTypes
- ; arg' <- tcMonoExpr arg arg_ty
+ ; arg' <- tcMonoExpr arg arg_ty
- ; return (HsCmdArrApp fun' arg' fun_ty ho_app lr) }
+ ; return (HsCmdArrApp fun' arg' fun_ty ho_app lr) }
where
-- Before type-checking f, use the environment of the enclosing
-- proc for the (-<) case.
@@ -219,7 +214,7 @@ tc_cmd env cmd@(HsCmdArrApp fun arg _ ho_app lr) (_, res_ty)
HsFirstOrderApp -> escapeArrowScope tc
-------------------------------------------
--- Command application
+-- Command application
--
-- D,G |- exp : t
-- D;G |-a cmd : (t,stk) --> res
@@ -227,76 +222,76 @@ tc_cmd env cmd@(HsCmdArrApp fun arg _ ho_app lr) (_, res_ty)
-- D;G |-a cmd exp : stk --> res
tc_cmd env cmd@(HsCmdApp fun arg) (cmd_stk, res_ty)
- = addErrCtxt (cmdCtxt cmd) $
+ = addErrCtxt (cmdCtxt cmd) $
do { arg_ty <- newFlexiTyVarTy openTypeKind
- ; fun' <- tcCmd env fun (mkPairTy arg_ty cmd_stk, res_ty)
- ; arg' <- tcMonoExpr arg arg_ty
- ; return (HsCmdApp fun' arg') }
+ ; fun' <- tcCmd env fun (mkPairTy arg_ty cmd_stk, res_ty)
+ ; arg' <- tcMonoExpr arg arg_ty
+ ; return (HsCmdApp fun' arg') }
-------------------------------------------
--- Lambda
+-- Lambda
--
-- D;G,x:t |-a cmd : stk --> res
-- ------------------------------
-- D;G |-a (\x.cmd) : (t,stk) --> res
tc_cmd env
- (HsCmdLam (MG { mg_alts = [L mtch_loc (match@(Match pats _maybe_rhs_sig grhss))] }))
+ (HsCmdLam (MG { mg_alts = [L mtch_loc (match@(Match pats _maybe_rhs_sig grhss))], mg_origin = origin }))
(cmd_stk, res_ty)
- = addErrCtxt (pprMatchInCtxt match_ctxt match) $
- do { (co, arg_tys, cmd_stk') <- matchExpectedCmdArgs n_pats cmd_stk
+ = addErrCtxt (pprMatchInCtxt match_ctxt match) $
+ do { (co, arg_tys, cmd_stk') <- matchExpectedCmdArgs n_pats cmd_stk
- -- Check the patterns, and the GRHSs inside
- ; (pats', grhss') <- setSrcSpan mtch_loc $
+ -- Check the patterns, and the GRHSs inside
+ ; (pats', grhss') <- setSrcSpan mtch_loc $
tcPats LambdaExpr pats arg_tys $
tc_grhss grhss cmd_stk' res_ty
- ; let match' = L mtch_loc (Match pats' Nothing grhss')
+ ; let match' = L mtch_loc (Match pats' Nothing grhss')
arg_tys = map hsLPatType pats'
cmd' = HsCmdLam (MG { mg_alts = [match'], mg_arg_tys = arg_tys
- , mg_res_ty = res_ty })
- ; return (mkHsCmdCast co cmd') }
+ , mg_res_ty = res_ty, mg_origin = origin })
+ ; return (mkHsCmdCast co cmd') }
where
n_pats = length pats
- match_ctxt = (LambdaExpr :: HsMatchContext Name) -- Maybe KappaExpr?
+ match_ctxt = (LambdaExpr :: HsMatchContext Name) -- Maybe KappaExpr?
pg_ctxt = PatGuard match_ctxt
tc_grhss (GRHSs grhss binds) stk_ty res_ty
- = do { (binds', grhss') <- tcLocalBinds binds $
- mapM (wrapLocM (tc_grhs stk_ty res_ty)) grhss
- ; return (GRHSs grhss' binds') }
+ = do { (binds', grhss') <- tcLocalBinds binds $
+ mapM (wrapLocM (tc_grhs stk_ty res_ty)) grhss
+ ; return (GRHSs grhss' binds') }
tc_grhs stk_ty res_ty (GRHS guards body)
- = do { (guards', rhs') <- tcStmtsAndThen pg_ctxt tcGuardStmt guards res_ty $
- \ res_ty -> tcCmd env body (stk_ty, res_ty)
- ; return (GRHS guards' rhs') }
+ = do { (guards', rhs') <- tcStmtsAndThen pg_ctxt tcGuardStmt guards res_ty $
+ \ res_ty -> tcCmd env body (stk_ty, res_ty)
+ ; return (GRHS guards' rhs') }
-------------------------------------------
--- Do notation
+-- Do notation
tc_cmd env (HsCmdDo stmts _) (cmd_stk, res_ty)
- = do { co <- unifyType unitTy cmd_stk -- Expecting empty argument stack
- ; stmts' <- tcStmts ArrowExpr (tcArrDoStmt env) stmts res_ty
- ; return (mkHsCmdCast co (HsCmdDo stmts' res_ty)) }
+ = do { co <- unifyType unitTy cmd_stk -- Expecting empty argument stack
+ ; stmts' <- tcStmts ArrowExpr (tcArrDoStmt env) stmts res_ty
+ ; return (mkHsCmdCast co (HsCmdDo stmts' res_ty)) }
-----------------------------------------------------------------
--- Arrow ``forms'' (| e c1 .. cn |)
+-- Arrow ``forms'' (| e c1 .. cn |)
--
--- D; G |-a1 c1 : stk1 --> r1
--- ...
--- D; G |-an cn : stkn --> rn
--- D |- e :: forall e. a1 (e, stk1) t1
+-- D; G |-a1 c1 : stk1 --> r1
+-- ...
+-- D; G |-an cn : stkn --> rn
+-- D |- e :: forall e. a1 (e, stk1) t1
-- ...
-- -> an (e, stkn) tn
-- -> a (e, stk) t
--- e \not\in (stk, stk1, ..., stkm, t, t1, ..., tn)
--- ----------------------------------------------
--- D; G |-a (| e c1 ... cn |) : stk --> t
+-- e \not\in (stk, stk1, ..., stkm, t, t1, ..., tn)
+-- ----------------------------------------------
+-- D; G |-a (| e c1 ... cn |) : stk --> t
-tc_cmd env cmd@(HsCmdArrForm expr fixity cmd_args) (cmd_stk, res_ty)
- = addErrCtxt (cmdCtxt cmd) $
- do { (cmd_args', cmd_tys) <- mapAndUnzipM tc_cmd_arg cmd_args
+tc_cmd env cmd@(HsCmdArrForm expr fixity cmd_args) (cmd_stk, res_ty)
+ = addErrCtxt (cmdCtxt cmd) $
+ do { (cmd_args', cmd_tys) <- mapAndUnzipM tc_cmd_arg cmd_args
; let e_ty = mkForAllTy alphaTyVar $ -- We use alphaTyVar for 'w'
mkFunTys cmd_tys $
mkCmdArrTy env (mkPairTy alphaTy cmd_stk) res_ty
@@ -307,19 +302,19 @@ tc_cmd env cmd@(HsCmdArrForm expr fixity cmd_args) (cmd_stk, res_ty)
tc_cmd_arg :: LHsCmdTop Name -> TcM (LHsCmdTop TcId, TcType)
tc_cmd_arg cmd
= do { arr_ty <- newFlexiTyVarTy arrowTyConKind
- ; stk_ty <- newFlexiTyVarTy liftedTypeKind
- ; res_ty <- newFlexiTyVarTy liftedTypeKind
- ; let env' = env { cmd_arr = arr_ty }
- ; cmd' <- tcCmdTop env' cmd (stk_ty, res_ty)
- ; return (cmd', mkCmdArrTy env' (mkPairTy alphaTy stk_ty) res_ty) }
+ ; stk_ty <- newFlexiTyVarTy liftedTypeKind
+ ; res_ty <- newFlexiTyVarTy liftedTypeKind
+ ; let env' = env { cmd_arr = arr_ty }
+ ; cmd' <- tcCmdTop env' cmd (stk_ty, res_ty)
+ ; return (cmd', mkCmdArrTy env' (mkPairTy alphaTy stk_ty) res_ty) }
-----------------------------------------------------------------
--- Base case for illegal commands
+-- Base case for illegal commands
-- This is where expressions that aren't commands get rejected
tc_cmd _ cmd _
= failWithTc (vcat [ptext (sLit "The expression"), nest 2 (ppr cmd),
- ptext (sLit "was found where an arrow command was expected")])
+ ptext (sLit "was found where an arrow command was expected")])
matchExpectedCmdArgs :: Arity -> TcType -> TcM (TcCoercion, [TcType], TcType)
@@ -333,34 +328,34 @@ matchExpectedCmdArgs n ty
%************************************************************************
-%* *
- Stmts
-%* *
+%* *
+ Stmts
+%* *
%************************************************************************
\begin{code}
--------------------------------
--- Mdo-notation
+-- Mdo-notation
-- The distinctive features here are
--- (a) RecStmts, and
--- (b) no rebindable syntax
+-- (a) RecStmts, and
+-- (b) no rebindable syntax
tcArrDoStmt :: CmdEnv -> TcCmdStmtChecker
tcArrDoStmt env _ (LastStmt rhs _) res_ty thing_inside
- = do { rhs' <- tcCmd env rhs (unitTy, res_ty)
- ; thing <- thing_inside (panic "tcArrDoStmt")
- ; return (LastStmt rhs' noSyntaxExpr, thing) }
+ = do { rhs' <- tcCmd env rhs (unitTy, res_ty)
+ ; thing <- thing_inside (panic "tcArrDoStmt")
+ ; return (LastStmt rhs' noSyntaxExpr, thing) }
tcArrDoStmt env _ (BodyStmt rhs _ _ _) res_ty thing_inside
- = do { (rhs', elt_ty) <- tc_arr_rhs env rhs
- ; thing <- thing_inside res_ty
- ; return (BodyStmt rhs' noSyntaxExpr noSyntaxExpr elt_ty, thing) }
+ = do { (rhs', elt_ty) <- tc_arr_rhs env rhs
+ ; thing <- thing_inside res_ty
+ ; return (BodyStmt rhs' noSyntaxExpr noSyntaxExpr elt_ty, thing) }
tcArrDoStmt env ctxt (BindStmt pat rhs _ _) res_ty thing_inside
- = do { (rhs', pat_ty) <- tc_arr_rhs env rhs
- ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
+ = do { (rhs', pat_ty) <- tc_arr_rhs env rhs
+ ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
thing_inside res_ty
- ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
+ ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
tcArrDoStmt env ctxt (RecStmt { recS_stmts = stmts, recS_later_ids = later_names
, recS_rec_ids = rec_names }) res_ty thing_inside
@@ -369,15 +364,15 @@ tcArrDoStmt env ctxt (RecStmt { recS_stmts = stmts, recS_later_ids = later_names
; let tup_ids = zipWith mkLocalId tup_names tup_elt_tys
; tcExtendIdEnv tup_ids $ do
{ (stmts', tup_rets)
- <- tcStmtsAndThen ctxt (tcArrDoStmt env) stmts res_ty $ \ _res_ty' ->
- -- ToDo: res_ty not really right
+ <- tcStmtsAndThen ctxt (tcArrDoStmt env) stmts res_ty $ \ _res_ty' ->
+ -- ToDo: res_ty not really right
zipWithM tcCheckId tup_names tup_elt_tys
; thing <- thing_inside res_ty
- -- NB: The rec_ids for the recursive things
- -- already scope over this part. This binding may shadow
- -- some of them with polymorphic things with the same Name
- -- (see note [RecStmt] in HsExpr)
+ -- NB: The rec_ids for the recursive things
+ -- already scope over this part. This binding may shadow
+ -- some of them with polymorphic things with the same Name
+ -- (see note [RecStmt] in HsExpr)
; let rec_ids = takeList rec_names tup_ids
; later_ids <- tcLookupLocalIds later_names
@@ -390,22 +385,22 @@ tcArrDoStmt env ctxt (RecStmt { recS_stmts = stmts, recS_later_ids = later_names
, recS_later_rets = later_rets
, recS_rec_ids = rec_ids, recS_rec_rets = rec_rets
, recS_ret_ty = res_ty }, thing)
- }}
+ }}
tcArrDoStmt _ _ stmt _ _
= pprPanic "tcArrDoStmt: unexpected Stmt" (ppr stmt)
tc_arr_rhs :: CmdEnv -> LHsCmd Name -> TcM (LHsCmd TcId, TcType)
tc_arr_rhs env rhs = do { ty <- newFlexiTyVarTy liftedTypeKind
- ; rhs' <- tcCmd env rhs (unitTy, ty)
- ; return (rhs', ty) }
+ ; rhs' <- tcCmd env rhs (unitTy, ty)
+ ; return (rhs', ty) }
\end{code}
%************************************************************************
-%* *
- Helpers
-%* *
+%* *
+ Helpers
+%* *
%************************************************************************
@@ -413,15 +408,15 @@ tc_arr_rhs env rhs = do { ty <- newFlexiTyVarTy liftedTypeKind
mkPairTy :: Type -> Type -> Type
mkPairTy t1 t2 = mkTyConApp pairTyCon [t1,t2]
-arrowTyConKind :: Kind -- *->*->*
+arrowTyConKind :: Kind -- *->*->*
arrowTyConKind = mkArrowKinds [liftedTypeKind, liftedTypeKind] liftedTypeKind
\end{code}
%************************************************************************
-%* *
- Errors
-%* *
+%* *
+ Errors
+%* *
%************************************************************************
\begin{code}
diff --git a/compiler/typecheck/TcBinds.lhs b/compiler/typecheck/TcBinds.lhs
index 47d45ae3187e..34db200ab6d7 100644
--- a/compiler/typecheck/TcBinds.lhs
+++ b/compiler/typecheck/TcBinds.lhs
@@ -5,16 +5,18 @@
\section[TcBinds]{TcBinds}
\begin{code}
+{-# LANGUAGE CPP, RankNTypes, ScopedTypeVariables #-}
+
module TcBinds ( tcLocalBinds, tcTopBinds, tcRecSelBinds,
tcHsBootSigs, tcPolyCheck,
PragFun, tcSpecPrags, tcVectDecls, mkPragFun,
TcSigInfo(..), TcSigFun,
- instTcTySig, instTcTySigFromId,
+ instTcTySig, instTcTySigFromId, findScopedTyVars,
badBootDeclErr ) where
import {-# SOURCE #-} TcMatches ( tcGRHSsPat, tcMatchesFun )
import {-# SOURCE #-} TcExpr ( tcMonoExpr )
-import {-# SOURCE #-} TcPatSyn ( tcPatSynDecl )
+import {-# SOURCE #-} TcPatSyn ( tcPatSynDecl, tcPatSynWrapper )
import DynFlags
import HsSyn
@@ -37,6 +39,7 @@ import TysPrim
import Id
import Var
import VarSet
+import VarEnv( TidyEnv )
import Module
import Name
import NameSet
@@ -54,6 +57,7 @@ import FastString
import Type(mkStrLitTy)
import Class(classTyCon)
import PrelNames(ipClassName)
+import TcValidity (checkValidType)
import Control.Monad
@@ -270,6 +274,30 @@ time by defaulting. No no no.
However [Oct 10] this is all handled automatically by the
untouchable-range idea.
+Note [Placeholder PatSyn kinds]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider this (Trac #9161)
+
+ {-# LANGUAGE PatternSynonyms, DataKinds #-}
+ pattern A = ()
+ b :: A
+ b = undefined
+
+Here, the type signature for b mentions A. But A is a pattern
+synonym, which is typechecked (for very good reasons; a view pattern
+in the RHS may mention a value binding) as part of a group of
+bindings. It is entirely resonable to reject this, but to do so
+we need A to be in the kind environment when kind-checking the signature for B.
+
+Hence the tcExtendKindEnv2 patsyn_placeholder_kinds, which adds a binding
+ A -> AGlobal (AConLike (PatSynCon _|_))
+to the environment. Then TcHsType.tcTyVar will find A in the kind environment,
+and will give a 'wrongThingErr' as a result. But the lookup of A won't fail.
+
+The _|_ (= panic "fakePatSynCon") works because the wrongThingErr call, in
+tcTyVar, doesn't look inside the TcTyThing.
+
+
\begin{code}
tcValBinds :: TopLevelFlag
-> [(RecFlag, LHsBinds Name)] -> [LSig Name]
@@ -277,19 +305,31 @@ tcValBinds :: TopLevelFlag
-> TcM ([(RecFlag, LHsBinds TcId)], thing)
tcValBinds top_lvl binds sigs thing_inside
- = do { -- Typecheck the signature
- (poly_ids, sig_fn) <- tcTySigs sigs
+ = do { -- Typecheck the signature
+ ; (poly_ids, sig_fn) <- tcExtendKindEnv2 patsyn_placeholder_kinds $
+ -- See Note [Placeholder PatSyn kinds]
+ tcTySigs sigs
; let prag_fn = mkPragFun sigs (foldr (unionBags . snd) emptyBag binds)
-- Extend the envt right away with all
-- the Ids declared with type signatures
-- Use tcExtendIdEnv2 to avoid extending the TcIdBinder stack
- ; (binds', thing) <- tcExtendIdEnv2 [(idName id, id) | id <- poly_ids] $
- tcBindGroups top_lvl sig_fn prag_fn
- binds thing_inside
-
- ; return (binds', thing) }
+ ; tcExtendIdEnv2 [(idName id, id) | id <- poly_ids] $ do
+ { (binds', (extra_binds', thing)) <- tcBindGroups top_lvl sig_fn prag_fn binds $ do
+ { thing <- thing_inside
+ -- See Note [Pattern synonym wrappers don't yield dependencies]
+ ; patsyn_wrappers <- mapM tcPatSynWrapper patsyns
+ ; let extra_binds = [ (NonRecursive, wrapper) | wrapper <- patsyn_wrappers ]
+ ; return (extra_binds, thing) }
+ ; return (binds' ++ extra_binds', thing) }}
+ where
+ patsyns
+ = [psb | (_, lbinds) <- binds, L _ (PatSynBind psb) <- bagToList lbinds]
+ patsyn_placeholder_kinds -- See Note [Placeholder PatSyn kinds]
+ = [(name, placeholder_patsyn_tything)| PSB{ psb_id = L _ name } <- patsyns ]
+ placeholder_patsyn_tything
+ = AGlobal $ AConLike $ PatSynCon $ panic "fakePatSynCon"
------------------------
tcBindGroups :: TopLevelFlag -> TcSigFun -> PragFun
@@ -345,14 +385,14 @@ tc_group top_lvl sig_fn prag_fn (Recursive, binds) thing_inside
; return ([(Recursive, binds1)], thing) }
-- Rec them all together
where
- hasPatSyn = anyBag (isPatSyn . unLoc . snd) binds
+ hasPatSyn = anyBag (isPatSyn . unLoc) binds
isPatSyn PatSynBind{} = True
isPatSyn _ = False
- sccs :: [SCC (Origin, LHsBind Name)]
+ sccs :: [SCC (LHsBind Name)]
sccs = stronglyConnCompFromEdgedVertices (mkEdges sig_fn binds)
- go :: [SCC (Origin, LHsBind Name)] -> TcM (LHsBinds TcId, [TcId], thing)
+ go :: [SCC (LHsBind Name)] -> TcM (LHsBinds TcId, [TcId], thing)
go (scc:sccs) = do { (binds1, ids1, closed) <- tc_scc scc
; (binds2, ids2, thing) <- tcExtendLetEnv top_lvl closed ids1 $
go sccs
@@ -368,7 +408,7 @@ recursivePatSynErr :: OutputableBndr name => LHsBinds name -> TcM a
recursivePatSynErr binds
= failWithTc $
hang (ptext (sLit "Recursive pattern synonym definition with following bindings:"))
- 2 (vcat $ map (pprLBind . snd) . bagToList $ binds)
+ 2 (vcat $ map pprLBind . bagToList $ binds)
where
pprLoc loc = parens (ptext (sLit "defined at") <+> ppr loc)
pprLBind (L loc bind) = pprWithCommas ppr (collectHsBindBinders bind) <+>
@@ -376,11 +416,10 @@ recursivePatSynErr binds
tc_single :: forall thing.
TopLevelFlag -> TcSigFun -> PragFun
- -> (Origin, LHsBind Name) -> TcM thing
+ -> LHsBind Name -> TcM thing
-> TcM (LHsBinds TcId, thing)
-tc_single _top_lvl _sig_fn _prag_fn (_, (L _ ps@PatSynBind{})) thing_inside
- = do { (pat_syn, aux_binds) <-
- tcPatSynDecl (patsyn_id ps) (patsyn_args ps) (patsyn_def ps) (patsyn_dir ps)
+tc_single _top_lvl _sig_fn _prag_fn (L _ (PatSynBind psb)) thing_inside
+ = do { (pat_syn, aux_binds) <- tcPatSynDecl psb
; let tything = AConLike (PatSynCon pat_syn)
implicit_ids = (patSynMatcher pat_syn) :
@@ -400,12 +439,12 @@ tc_single top_lvl sig_fn prag_fn lbind thing_inside
------------------------
mkEdges :: TcSigFun -> LHsBinds Name
- -> [((Origin, LHsBind Name), BKey, [BKey])]
+ -> [(LHsBind Name, BKey, [BKey])]
type BKey = Int -- Just number off the bindings
mkEdges sig_fn binds
- = [ (bind, key, [key | n <- nameSetToList (bind_fvs (unLoc . snd $ bind)),
+ = [ (bind, key, [key | n <- nameSetToList (bind_fvs (unLoc bind)),
Just key <- [lookupNameEnv key_map n], no_sig n ])
| (bind, key) <- keyd_binds
]
@@ -416,13 +455,13 @@ mkEdges sig_fn binds
keyd_binds = bagToList binds `zip` [0::BKey ..]
key_map :: NameEnv BKey -- Which binding it comes from
- key_map = mkNameEnv [(bndr, key) | ((_, L _ bind), key) <- keyd_binds
+ key_map = mkNameEnv [(bndr, key) | (L _ bind, key) <- keyd_binds
, bndr <- bindersOfHsBind bind ]
bindersOfHsBind :: HsBind Name -> [Name]
bindersOfHsBind (PatBind { pat_lhs = pat }) = collectPatBinders pat
bindersOfHsBind (FunBind { fun_id = L _ f }) = [f]
-bindersOfHsBind (PatSynBind { patsyn_id = L _ psyn }) = [psyn]
+bindersOfHsBind (PatSynBind PSB{ psb_id = L _ psyn }) = [psyn]
bindersOfHsBind (AbsBinds {}) = panic "bindersOfHsBind AbsBinds"
bindersOfHsBind (VarBind {}) = panic "bindersOfHsBind VarBind"
@@ -431,7 +470,7 @@ tcPolyBinds :: TopLevelFlag -> TcSigFun -> PragFun
-> RecFlag -- Whether the group is really recursive
-> RecFlag -- Whether it's recursive after breaking
-- dependencies based on type signatures
- -> [(Origin, LHsBind Name)]
+ -> [LHsBind Name]
-> TcM (LHsBinds TcId, [TcId], TopLevelFlag)
-- Typechecks a single bunch of bindings all together,
@@ -471,9 +510,8 @@ tcPolyBinds top_lvl sig_fn prag_fn rec_group rec_tc bind_list
; return result }
where
- bind_list' = map snd bind_list
- binder_names = collectHsBindListBinders bind_list'
- loc = foldr1 combineSrcSpans (map getLoc bind_list')
+ binder_names = collectHsBindListBinders bind_list
+ loc = foldr1 combineSrcSpans (map getLoc bind_list)
-- The mbinds have been dependency analysed and
-- may no longer be adjacent; so find the narrowest
-- span that includes them all
@@ -483,7 +521,7 @@ tcPolyNoGen -- No generalisation whatsoever
:: RecFlag -- Whether it's recursive after breaking
-- dependencies based on type signatures
-> PragFun -> TcSigFun
- -> [(Origin, LHsBind Name)]
+ -> [LHsBind Name]
-> TcM (LHsBinds TcId, [TcId], TopLevelFlag)
tcPolyNoGen rec_tc prag_fn tc_sig_fn bind_list
@@ -508,7 +546,7 @@ tcPolyNoGen rec_tc prag_fn tc_sig_fn bind_list
tcPolyCheck :: RecFlag -- Whether it's recursive after breaking
-- dependencies based on type signatures
-> PragFun -> TcSigInfo
- -> (Origin, LHsBind Name)
+ -> LHsBind Name
-> TcM (LHsBinds TcId, [TcId], TopLevelFlag)
-- There is just one binding,
-- it binds a single variable,
@@ -516,11 +554,11 @@ tcPolyCheck :: RecFlag -- Whether it's recursive after breaking
tcPolyCheck rec_tc prag_fn
sig@(TcSigInfo { sig_id = poly_id, sig_tvs = tvs_w_scoped
, sig_theta = theta, sig_tau = tau, sig_loc = loc })
- bind@(origin, _)
+ bind
= do { ev_vars <- newEvVars theta
; let skol_info = SigSkol (FunSigCtxt (idName poly_id)) (mkPhiTy theta tau)
prag_sigs = prag_fn (idName poly_id)
- ; tvs <- mapM (skolemiseSigTv . snd) tvs_w_scoped
+ tvs = map snd tvs_w_scoped
; (ev_binds, (binds', [mono_info]))
<- setSrcSpan loc $
checkConstraints skol_info tvs ev_vars $
@@ -541,7 +579,7 @@ tcPolyCheck rec_tc prag_fn
, abs_exports = [export], abs_binds = binds' }
closed | isEmptyVarSet (tyVarsOfType (idType poly_id)) = TopLevel
| otherwise = NotTopLevel
- ; return (unitBag (origin, abs_bind), [poly_id], closed) }
+ ; return (unitBag abs_bind, [poly_id], closed) }
------------------
tcPolyInfer
@@ -550,7 +588,7 @@ tcPolyInfer
-> PragFun -> TcSigFun
-> Bool -- True <=> apply the monomorphism restriction
-> Bool -- True <=> free vars have closed types
- -> [(Origin, LHsBind Name)]
+ -> [LHsBind Name]
-> TcM (LHsBinds TcId, [TcId], TopLevelFlag)
tcPolyInfer rec_tc prag_fn tc_sig_fn mono closed bind_list
= do { ((binds', mono_infos), wanted)
@@ -559,12 +597,11 @@ tcPolyInfer rec_tc prag_fn tc_sig_fn mono closed bind_list
; let name_taus = [(name, idType mono_id) | (name, _, mono_id) <- mono_infos]
; traceTc "simplifyInfer call" (ppr name_taus $$ ppr wanted)
- ; (qtvs, givens, mr_bites, ev_binds) <-
- simplifyInfer closed mono name_taus wanted
+ ; (qtvs, givens, mr_bites, ev_binds)
+ <- simplifyInfer closed mono name_taus wanted
- ; theta <- zonkTcThetaType (map evVarPred givens)
+ ; theta <- zonkTcThetaType (map evVarPred givens)
; exports <- checkNoErrs $ mapM (mkExport prag_fn qtvs theta) mono_infos
-
; loc <- getSrcSpanM
; let poly_ids = map abe_poly exports
final_closed | closed && not mr_bites = TopLevel
@@ -576,10 +613,8 @@ tcPolyInfer rec_tc prag_fn tc_sig_fn mono closed bind_list
; traceTc "Binding:" (ppr final_closed $$
ppr (poly_ids `zip` map idType poly_ids))
- ; return (unitBag (origin, abs_bind), poly_ids, final_closed) }
+ ; return (unitBag abs_bind, poly_ids, final_closed) }
-- poly_ids are guaranteed zonked by mkExport
- where
- origin = if all isGenerated (map fst bind_list) then Generated else FromSource
--------------
mkExport :: PragFun
@@ -601,20 +636,12 @@ mkExport :: PragFun
mkExport prag_fn qtvs theta (poly_name, mb_sig, mono_id)
= do { mono_ty <- zonkTcType (idType mono_id)
- ; let poly_id = case mb_sig of
- Nothing -> mkLocalId poly_name inferred_poly_ty
- Just sig -> sig_id sig
- -- poly_id has a zonked type
-
- -- In the inference case (no signature) this stuff figures out
- -- the right type variables and theta to quantify over
- -- See Note [Impedence matching]
- my_tvs2 = closeOverKinds (growThetaTyVars theta (tyVarsOfType mono_ty))
- -- Include kind variables! Trac #7916
- my_tvs = filter (`elemVarSet` my_tvs2) qtvs -- Maintain original order
- my_theta = filter (quantifyPred my_tvs2) theta
- inferred_poly_ty = mkSigmaTy my_tvs my_theta mono_ty
+ ; poly_id <- case mb_sig of
+ Just sig -> return (sig_id sig)
+ Nothing -> mkInferredPolyId poly_name qtvs theta mono_ty
+
+ -- NB: poly_id has a zonked type
; poly_id <- addInlinePrags poly_id prag_sigs
; spec_prags <- tcSpecPrags poly_id prag_sigs
-- tcPrags requires a zonked poly_id
@@ -630,7 +657,7 @@ mkExport prag_fn qtvs theta (poly_name, mb_sig, mono_id)
-- closed (unless we are doing NoMonoLocalBinds in which case all bets
-- are off)
-- See Note [Impedence matching]
- ; (wrap, wanted) <- addErrCtxtM (mk_msg poly_id) $
+ ; (wrap, wanted) <- addErrCtxtM (mk_bind_msg inferred True poly_name (idType poly_id)) $
captureConstraints $
tcSubType origin sig_ctxt sel_poly_ty (idType poly_id)
; ev_binds <- simplifyTop wanted
@@ -641,24 +668,58 @@ mkExport prag_fn qtvs theta (poly_name, mb_sig, mono_id)
, abe_prags = SpecPrags spec_prags }) }
where
inferred = isNothing mb_sig
-
- mk_msg poly_id tidy_env
- = return (tidy_env', msg)
- where
- msg | inferred = hang (ptext (sLit "When checking that") <+> pp_name)
- 2 (ptext (sLit "has the inferred type") <+> pp_ty)
- $$ ptext (sLit "Probable cause: the inferred type is ambiguous")
- | otherwise = hang (ptext (sLit "When checking that") <+> pp_name)
- 2 (ptext (sLit "has the specified type") <+> pp_ty)
- pp_name = quotes (ppr poly_name)
- pp_ty = quotes (ppr tidy_ty)
- (tidy_env', tidy_ty) = tidyOpenType tidy_env (idType poly_id)
-
prag_sigs = prag_fn poly_name
origin = AmbigOrigin sig_ctxt
sig_ctxt = InfSigCtxt poly_name
+
+mkInferredPolyId :: Name -> [TyVar] -> TcThetaType -> TcType -> TcM Id
+-- In the inference case (no signature) this stuff figures out
+-- the right type variables and theta to quantify over
+-- See Note [Validity of inferred types]
+mkInferredPolyId poly_name qtvs theta mono_ty
+ = addErrCtxtM (mk_bind_msg True False poly_name inferred_poly_ty) $
+ do { checkValidType (InfSigCtxt poly_name) inferred_poly_ty
+ ; return (mkLocalId poly_name inferred_poly_ty) }
+ where
+ my_tvs2 = closeOverKinds (growThetaTyVars theta (tyVarsOfType mono_ty))
+ -- Include kind variables! Trac #7916
+ my_tvs = filter (`elemVarSet` my_tvs2) qtvs -- Maintain original order
+ my_theta = filter (quantifyPred my_tvs2) theta
+ inferred_poly_ty = mkSigmaTy my_tvs my_theta mono_ty
+
+mk_bind_msg :: Bool -> Bool -> Name -> TcType -> TidyEnv -> TcM (TidyEnv, SDoc)
+mk_bind_msg inferred want_ambig poly_name poly_ty tidy_env
+ = return (tidy_env', msg)
+ where
+ msg = vcat [ ptext (sLit "When checking that") <+> quotes (ppr poly_name)
+ <+> ptext (sLit "has the") <+> what <+> ptext (sLit "type")
+ , nest 2 (ppr poly_name <+> dcolon <+> ppr tidy_ty)
+ , ppWhen want_ambig $
+ ptext (sLit "Probable cause: the inferred type is ambiguous") ]
+ what | inferred = ptext (sLit "inferred")
+ | otherwise = ptext (sLit "specified")
+ (tidy_env', tidy_ty) = tidyOpenType tidy_env poly_ty
\end{code}
+Note [Validity of inferred types]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We need to check inferred type for validity, in case it uses language
+extensions that are not turned on. The principle is that if the user
+simply adds the inferred type to the program source, it'll compile fine.
+See #8883.
+
+Examples that might fail:
+ - an inferred theta that requires type equalities e.g. (F a ~ G b)
+ or multi-parameter type classes
+ - an inferred type that includes unboxed tuples
+
+However we don't do the ambiguity check (checkValidType omits it for
+InfSigCtxt) because the impedence-matching stage, which follows
+immediately, will do it and we don't want two error messages.
+Moreover, because of the impedence matching stage, the ambiguity-check
+suggestion of -XAllowAmbiguiousTypes will not work.
+
+
Note [Impedence matching]
~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
@@ -704,7 +765,7 @@ type PragFun = Name -> [LSig Name]
mkPragFun :: [LSig Name] -> LHsBinds Name -> PragFun
mkPragFun sigs binds = \n -> lookupNameEnv prag_env n `orElse` []
where
- prs = mapCatMaybes get_sig sigs
+ prs = mapMaybe get_sig sigs
get_sig :: LSig Name -> Maybe (Located Name, LSig Name)
get_sig (L l (SpecSig nm ty inl)) = Just (nm, L l $ SpecSig nm ty (add_arity nm inl))
@@ -723,7 +784,7 @@ mkPragFun sigs binds = \n -> lookupNameEnv prag_env n `orElse` []
-- ar_env maps a local to the arity of its definition
ar_env :: NameEnv Arity
- ar_env = foldrBag (lhsBindArity . snd) emptyNameEnv binds
+ ar_env = foldrBag lhsBindArity emptyNameEnv binds
lhsBindArity :: LHsBind Name -> NameEnv Arity -> NameEnv Arity
lhsBindArity (L _ (FunBind { fun_id = id, fun_matches = ms })) env
@@ -739,7 +800,8 @@ tcSpecPrags :: Id -> [LSig Name]
-- Pre-condition: the poly_id is zonked
-- Reason: required by tcSubExp
tcSpecPrags poly_id prag_sigs
- = do { unless (null bad_sigs) warn_discarded_sigs
+ = do { traceTc "tcSpecPrags" (ppr poly_id <+> ppr spec_sigs)
+ ; unless (null bad_sigs) warn_discarded_sigs
; mapAndRecoverM (wrapLocM (tcSpec poly_id)) spec_sigs }
where
spec_sigs = filter isSpecLSig prag_sigs
@@ -777,7 +839,7 @@ tcSpec _ prag = pprPanic "tcSpec" (ppr prag)
--------------
tcImpPrags :: [LSig Name] -> TcM [LTcSpecPrag]
--- SPECIALISE pragamas for imported things
+-- SPECIALISE pragmas for imported things
tcImpPrags prags
= do { this_mod <- getModule
; dflags <- getDynFlags
@@ -992,12 +1054,12 @@ tcMonoBinds :: RecFlag -- Whether the binding is recursive for typechecking pur
-- i.e. the binders are mentioned in their RHSs, and
-- we are not rescued by a type signature
-> TcSigFun -> LetBndrSpec
- -> [(Origin, LHsBind Name)]
+ -> [LHsBind Name]
-> TcM (LHsBinds TcId, [MonoBindInfo])
tcMonoBinds is_rec sig_fn no_gen
- [ (origin, L b_loc (FunBind { fun_id = L nm_loc name, fun_infix = inf,
- fun_matches = matches, bind_fvs = fvs }))]
+ [ L b_loc (FunBind { fun_id = L nm_loc name, fun_infix = inf,
+ fun_matches = matches, bind_fvs = fvs })]
-- Single function binding,
| NonRecursive <- is_rec -- ...binder isn't mentioned in RHS
, Nothing <- sig_fn name -- ...with no type signature
@@ -1015,17 +1077,16 @@ tcMonoBinds is_rec sig_fn no_gen
-- type of the thing whose rhs we are type checking
tcMatchesFun name inf matches rhs_ty
- ; return (unitBag (origin,
- L b_loc (FunBind { fun_id = L nm_loc mono_id, fun_infix = inf,
- fun_matches = matches', bind_fvs = fvs,
- fun_co_fn = co_fn, fun_tick = Nothing })),
+ ; return (unitBag $ L b_loc (FunBind { fun_id = L nm_loc mono_id, fun_infix = inf,
+ fun_matches = matches', bind_fvs = fvs,
+ fun_co_fn = co_fn, fun_tick = Nothing }),
[(name, Nothing, mono_id)]) }
tcMonoBinds _ sig_fn no_gen binds
- = do { tc_binds <- mapM (wrapOriginLocM (tcLhs sig_fn no_gen)) binds
+ = do { tc_binds <- mapM (wrapLocM (tcLhs sig_fn no_gen)) binds
-- Bring the monomorphic Ids, into scope for the RHSs
- ; let mono_info = getMonoBindInfo (map snd tc_binds)
+ ; let mono_info = getMonoBindInfo tc_binds
rhs_id_env = [(name,mono_id) | (name, Nothing, mono_id) <- mono_info]
-- A monomorphic binding for each term variable that lacks
-- a type sig. (Ones with a sig are already in scope.)
@@ -1033,7 +1094,7 @@ tcMonoBinds _ sig_fn no_gen binds
; traceTc "tcMonoBinds" $ vcat [ ppr n <+> ppr id <+> ppr (idType id)
| (n,id) <- rhs_id_env]
; binds' <- tcExtendIdEnv2 rhs_id_env $
- mapM (wrapOriginLocM tcRhs) tc_binds
+ mapM (wrapLocM tcRhs) tc_binds
; return (listToBag binds', mono_info) }
------------------------
@@ -1162,18 +1223,6 @@ However, we do *not* support this
f :: forall a. a->a
(f,g) = e
-Note [More instantiated than scoped]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There may be more instantiated type variables than lexically-scoped
-ones. For example:
- type T a = forall b. b -> (a,b)
- f :: forall c. T c
-Here, the signature for f will have one scoped type variable, c,
-but two instantiated type variables, c' and b'.
-
-We assume that the scoped ones are at the *front* of sig_tvs,
-and remember the names from the original HsForAllTy in the TcSigFun.
-
Note [Signature skolems]
~~~~~~~~~~~~~~~~~~~~~~~~
When instantiating a type signature, we do so with either skolems or
@@ -1248,45 +1297,28 @@ tcTySig _ = return []
instTcTySigFromId :: SrcSpan -> Id -> TcM TcSigInfo
instTcTySigFromId loc id
- = do { (tvs, theta, tau) <- tcInstType inst_sig_tyvars (idType id)
+ = do { (tvs, theta, tau) <- tcInstType (tcInstSigTyVarsLoc loc)
+ (idType id)
; return (TcSigInfo { sig_id = id, sig_loc = loc
, sig_tvs = [(Nothing, tv) | tv <- tvs]
, sig_theta = theta, sig_tau = tau }) }
where
-- Hack: in an instance decl we use the selector id as
- -- the template; but we do *not* want the SrcSpan on the Name of
+ -- the template; but we do *not* want the SrcSpan on the Name of
-- those type variables to refer to the class decl, rather to
- -- the instance decl
- inst_sig_tyvars tvs = tcInstSigTyVars (map set_loc tvs)
- set_loc tv = setTyVarName tv (mkInternalName (nameUnique n) (nameOccName n) loc)
- where
- n = tyVarName tv
+ -- the instance decl
instTcTySig :: LHsType Name -> TcType -- HsType and corresponding TcType
-> Name -> TcM TcSigInfo
instTcTySig hs_ty@(L loc _) sigma_ty name
= do { (inst_tvs, theta, tau) <- tcInstType tcInstSigTyVars sigma_ty
- ; return (TcSigInfo { sig_id = poly_id, sig_loc = loc
- , sig_tvs = zipEqual "instTcTySig" scoped_tvs inst_tvs
+ ; return (TcSigInfo { sig_id = mkLocalId name sigma_ty
+ , sig_loc = loc
+ , sig_tvs = findScopedTyVars hs_ty sigma_ty inst_tvs
, sig_theta = theta, sig_tau = tau }) }
- where
- poly_id = mkLocalId name sigma_ty
-
- scoped_names = hsExplicitTvs hs_ty
- (sig_tvs,_) = tcSplitForAllTys sigma_ty
-
- scoped_tvs :: [Maybe Name]
- scoped_tvs = mk_scoped scoped_names sig_tvs
-
- mk_scoped :: [Name] -> [TyVar] -> [Maybe Name]
- mk_scoped [] tvs = [Nothing | _ <- tvs]
- mk_scoped (n:ns) (tv:tvs)
- | n == tyVarName tv = Just n : mk_scoped ns tvs
- | otherwise = Nothing : mk_scoped (n:ns) tvs
- mk_scoped (n:ns) [] = pprPanic "mk_scoped" (ppr name $$ ppr (n:ns) $$ ppr hs_ty $$ ppr sigma_ty)
-------------------------------
-data GeneralisationPlan
+data GeneralisationPlan
= NoGen -- No generalisation, no AbsBinds
| InferGen -- Implicit generalisation; there is an AbsBinds
@@ -1294,7 +1326,7 @@ data GeneralisationPlan
Bool -- True <=> bindings mention only variables with closed types
-- See Note [Bindings with closed types] in TcRnTypes
- | CheckGen (Origin, LHsBind Name) TcSigInfo
+ | CheckGen (LHsBind Name) TcSigInfo
-- One binding with a signature
-- Explicit generalisation; there is an AbsBinds
@@ -1306,25 +1338,25 @@ instance Outputable GeneralisationPlan where
ppr (InferGen b c) = ptext (sLit "InferGen") <+> ppr b <+> ppr c
ppr (CheckGen _ s) = ptext (sLit "CheckGen") <+> ppr s
-decideGeneralisationPlan
+decideGeneralisationPlan
:: DynFlags -> TcTypeEnv -> [Name]
- -> [(Origin, LHsBind Name)] -> TcSigFun -> GeneralisationPlan
+ -> [LHsBind Name] -> TcSigFun -> GeneralisationPlan
decideGeneralisationPlan dflags type_env bndr_names lbinds sig_fn
- | bang_pat_binds = NoGen
+ | strict_pat_binds = NoGen
| Just (lbind, sig) <- one_funbind_with_sig lbinds = CheckGen lbind sig
- | mono_local_binds = NoGen
- | otherwise = InferGen mono_restriction closed_flag
+ | mono_local_binds = NoGen
+ | otherwise = InferGen mono_restriction closed_flag
where
bndr_set = mkNameSet bndr_names
- binds = map (unLoc . snd) lbinds
+ binds = map unLoc lbinds
- bang_pat_binds = any isBangHsBind binds
- -- Bang patterns must not be polymorphic,
- -- because we are going to force them
- -- See Trac #4498
+ strict_pat_binds = any isStrictHsBind binds
+ -- Strict patterns (top level bang or unboxed tuple) must not
+ -- be polymorphic, because we are going to force them
+ -- See Trac #4498, #8762
- mono_restriction = xopt Opt_MonomorphismRestriction dflags
+ mono_restriction = xopt Opt_MonomorphismRestriction dflags
&& any restricted binds
is_closed_ns :: NameSet -> Bool -> Bool
@@ -1333,7 +1365,7 @@ decideGeneralisationPlan dflags type_env bndr_names lbinds sig_fn
is_closed_id :: Name -> Bool
-- See Note [Bindings with closed types] in TcRnTypes
- is_closed_id name
+ is_closed_id name
| name `elemNameSet` bndr_set
= True -- Ignore binders in this groups, of course
| Just thing <- lookupNameEnv type_env name
@@ -1346,19 +1378,19 @@ decideGeneralisationPlan dflags type_env bndr_names lbinds sig_fn
= WARN( isInternalName name, ppr name ) True
-- The free-var set for a top level binding mentions
-- imported things too, so that we can report unused imports
- -- These won't be in the local type env.
+ -- These won't be in the local type env.
-- Ditto class method etc from the current module
-
+
closed_flag = foldr (is_closed_ns . bind_fvs) True binds
- mono_local_binds = xopt Opt_MonoLocalBinds dflags
+ mono_local_binds = xopt Opt_MonoLocalBinds dflags
&& not closed_flag
no_sig n = isNothing (sig_fn n)
-- With OutsideIn, all nested bindings are monomorphic
-- except a single function binding with a signature
- one_funbind_with_sig [lbind@(_, L _ (FunBind { fun_id = v }))]
+ one_funbind_with_sig [lbind@(L _ (FunBind { fun_id = v }))]
= case sig_fn (unLoc v) of
Nothing -> Nothing
Just sig -> Just (lbind, sig)
@@ -1380,78 +1412,77 @@ decideGeneralisationPlan dflags type_env bndr_names lbinds sig_fn
-------------------
checkStrictBinds :: TopLevelFlag -> RecFlag
- -> [(Origin, LHsBind Name)]
+ -> [LHsBind Name]
-> LHsBinds TcId -> [Id]
-> TcM ()
-- Check that non-overloaded unlifted bindings are
-- a) non-recursive,
--- b) not top level,
+-- b) not top level,
-- c) not a multiple-binding group (more or less implied by (a))
checkStrictBinds top_lvl rec_group orig_binds tc_binds poly_ids
- | unlifted || bang_pat
+ | unlifted_bndrs || any_strict_pat -- This binding group must be matched strictly
= do { checkTc (isNotTopLevel top_lvl)
- (strictBindErr "Top-level" unlifted orig_binds)
+ (strictBindErr "Top-level" unlifted_bndrs orig_binds)
; checkTc (isNonRec rec_group)
- (strictBindErr "Recursive" unlifted orig_binds)
+ (strictBindErr "Recursive" unlifted_bndrs orig_binds)
; checkTc (all is_monomorphic (bagToList tc_binds))
(polyBindErr orig_binds)
-- data Ptr a = Ptr Addr#
-- f x = let p@(Ptr y) = ... in ...
- -- Here the binding for 'p' is polymorphic, but does
+ -- Here the binding for 'p' is polymorphic, but does
-- not mix with an unlifted binding for 'y'. You should
-- use a bang pattern. Trac #6078.
-
+
; checkTc (isSingleton orig_binds)
- (strictBindErr "Multiple" unlifted orig_binds)
-
- -- Ensure that unlifted bindings which look lazy, like:
- -- f x = let I# y = x
- -- cause an error.
- ; when lifted_pat $
- checkTc bang_pat
- -- No outer bang, but it's a compound pattern
- -- E.g (I# x#) = blah
- -- Warn about this, but not about
- -- x# = 4# +# 1#
- -- (# a, b #) = ...
- (unliftedMustBeBang orig_binds) }
+ (strictBindErr "Multiple" unlifted_bndrs orig_binds)
+
+ -- Complain about a binding that looks lazy
+ -- e.g. let I# y = x in ...
+ -- Remember, in checkStrictBinds we are going to do strict
+ -- matching, so (for software engineering reasons) we insist
+ -- that the strictness is manifest on each binding
+ -- However, lone (unboxed) variables are ok
+ ; checkTc (not any_pat_looks_lazy)
+ (unliftedMustBeBang orig_binds) }
| otherwise
= traceTc "csb2" (ppr poly_ids) >>
return ()
where
- unlifted = any is_unlifted poly_ids
- bang_pat = any (isBangHsBind . unLoc . snd) orig_binds
- lifted_pat = any (isLiftedPatBind . unLoc . snd) orig_binds
+ unlifted_bndrs = any is_unlifted poly_ids
+ any_strict_pat = any (isStrictHsBind . unLoc) orig_binds
+ any_pat_looks_lazy = any (looksLazyPatBind . unLoc) orig_binds
is_unlifted id = case tcSplitForAllTys (idType id) of
(_, rho) -> isUnLiftedType rho
- is_monomorphic (_, (L _ (AbsBinds { abs_tvs = tvs, abs_ev_vars = evs })))
+ is_monomorphic (L _ (AbsBinds { abs_tvs = tvs, abs_ev_vars = evs }))
= null tvs && null evs
is_monomorphic _ = True
-unliftedMustBeBang :: [(Origin, LHsBind Name)] -> SDoc
+unliftedMustBeBang :: [LHsBind Name] -> SDoc
unliftedMustBeBang binds
= hang (text "Pattern bindings containing unlifted types should use an outermost bang pattern:")
- 2 (vcat (map (ppr . snd) binds))
+ 2 (vcat (map ppr binds))
-polyBindErr :: [(Origin, LHsBind Name)] -> SDoc
+polyBindErr :: [LHsBind Name] -> SDoc
polyBindErr binds
= hang (ptext (sLit "You can't mix polymorphic and unlifted bindings"))
- 2 (vcat [vcat (map (ppr . snd) binds),
+ 2 (vcat [vcat (map ppr binds),
ptext (sLit "Probable fix: use a bang pattern")])
-strictBindErr :: String -> Bool -> [(Origin, LHsBind Name)] -> SDoc
-strictBindErr flavour unlifted binds
+strictBindErr :: String -> Bool -> [LHsBind Name] -> SDoc
+strictBindErr flavour unlifted_bndrs binds
= hang (text flavour <+> msg <+> ptext (sLit "aren't allowed:"))
- 2 (vcat (map (ppr . snd) binds))
+ 2 (vcat (map ppr binds))
where
- msg | unlifted = ptext (sLit "bindings for unlifted types")
- | otherwise = ptext (sLit "bang-pattern bindings")
+ msg | unlifted_bndrs = ptext (sLit "bindings for unlifted types")
+ | otherwise = ptext (sLit "bang-pattern or unboxed-tuple bindings")
\end{code}
+Note [Binding scoped type variables]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%************************************************************************
%* *
diff --git a/compiler/typecheck/TcCanonical.lhs b/compiler/typecheck/TcCanonical.lhs
index 823b37fa1a58..d58d5db40fd0 100644
--- a/compiler/typecheck/TcCanonical.lhs
+++ b/compiler/typecheck/TcCanonical.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcCanonical(
canonicalize, emitWorkNC,
StopOrContinue (..)
@@ -385,22 +387,9 @@ canIrred old_ev
; case classifyPredType (ctEvPred new_ev) of
ClassPred cls tys -> canClassNC new_ev cls tys
TuplePred tys -> canTuple new_ev tys
- EqPred ty1 ty2
- | something_changed old_ty ty1 ty2 -> canEqNC new_ev ty1 ty2
- _ -> continueWith $
- CIrredEvCan { cc_ev = new_ev } } } }
- where
- -- If the constraint was a kind-mis-matched equality, we must
- -- retry canEqNC only if something has changed, otherwise we
- -- get an infinite loop
- something_changed old_ty new_ty1 new_ty2
- | EqPred old_ty1 old_ty2 <- classifyPredType old_ty
- = not ( new_ty1 `tcEqType` old_ty1
- && typeKind new_ty1 `tcEqKind` typeKind old_ty1
- && new_ty2 `tcEqType` old_ty2
- && typeKind new_ty2 `tcEqKind` typeKind old_ty2)
- | otherwise
- = True
+ EqPred ty1 ty2 -> canEqNC new_ev ty1 ty2
+ _ -> continueWith $
+ CIrredEvCan { cc_ev = new_ev } } } }
canHole :: CtEvidence -> OccName -> TcS StopOrContinue
canHole ev occ
@@ -488,14 +477,6 @@ flatten :: FlattenMode
--
-- Postcondition: Coercion :: Xi ~ TcType
-flatten f ctxt ty
- | Just ty' <- tcView ty
- = do { (xi, co) <- flatten f ctxt ty'
- ; if xi `tcEqType` ty' then return (ty,co)
- else return (xi,co) }
- -- Small tweak for better error messages
- -- by preserving type synonyms where possible
-
flatten _ _ xi@(LitTy {}) = return (xi, mkTcNomReflCo xi)
flatten f ctxt (TyVarTy tv)
@@ -513,11 +494,21 @@ flatten f ctxt (FunTy ty1 ty2)
; return (mkFunTy xi1 xi2, mkTcFunCo Nominal co1 co2) }
flatten f ctxt (TyConApp tc tys)
- -- For a normal type constructor or data family application,
+
+ -- Expand type synonyms that mention type families
+ -- on the RHS; see Note [Flattening synonyms]
+ | Just (tenv, rhs, tys') <- tcExpandTyCon_maybe tc tys
+ , any isSynFamilyTyCon (tyConsOfType rhs)
+ = flatten f ctxt (mkAppTys (substTy (mkTopTvSubst tenv) rhs) tys')
+
+ -- For * a normal data type application
+ -- * data family application
+ -- * type synonym application whose RHS does not mention type families
+ -- See Note [Flattening synonyms]
-- we just recursively flatten the arguments.
| not (isSynFamilyTyCon tc)
- = do { (xis,cos) <- flattenMany f ctxt tys
- ; return (mkTyConApp tc xis, mkTcTyConAppCo Nominal tc cos) }
+ = do { (xis,cos) <- flattenMany f ctxt tys
+ ; return (mkTyConApp tc xis, mkTcTyConAppCo Nominal tc cos) }
-- Otherwise, it's a type function application, and we have to
-- flatten it away as well, and generate a new given equality constraint
@@ -551,10 +542,28 @@ flatten _f ctxt ty@(ForAllTy {})
; return (mkForAllTys tvs rho', foldr mkTcForAllCo co tvs) }
\end{code}
+Note [Flattening synonyms]
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Not expanding synonyms aggressively improves error messages, and
+keeps types smaller. But we need to take care.
+
+Suppose
+ type T a = a -> a
+and we want to flatten the type (T (F a)). Then we can safely flatten
+the (F a) to a skolem, and return (T fsk). We don't need to expand the
+synonym. This works because TcTyConAppCo can deal with synonyms
+(unlike TyConAppCo), see Note [TcCoercions] in TcEvidence.
+
+But (Trac #8979) for
+ type T a = (F a, a) where F is a type function
+we must expand the synonym in (say) T Int, to expose the type function
+to the flattener.
+
+
Note [Flattening under a forall]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Under a forall, we
- (a) MUST apply the inert subsitution
+ (a) MUST apply the inert substitution
(b) MUST NOT flatten type family applications
Hence FMSubstOnly.
@@ -624,12 +633,14 @@ flattenTyVar f ctxt tv
= do { mb_yes <- flattenTyVarOuter f ctxt tv
; case mb_yes of
Left tv' -> -- Done
- return (ty, mkTcNomReflCo ty)
+ do { traceTcS "flattenTyVar1" (ppr tv $$ ppr (tyVarKind tv'))
+ ; return (ty', mkTcNomReflCo ty') }
where
- ty = mkTyVarTy tv'
+ ty' = mkTyVarTy tv'
Right (ty1, co1) -> -- Recurse
do { (ty2, co2) <- flatten f ctxt ty1
+ ; traceTcS "flattenTyVar2" (ppr tv $$ ppr ty2)
; return (ty2, co2 `mkTcTransCo` co1) }
}
@@ -732,7 +743,8 @@ canEvVarsCreated (ev : evs)
emitWorkNC :: [CtEvidence] -> TcS ()
emitWorkNC evs
| null evs = return ()
- | otherwise = updWorkListTcS (extendWorkListCts (map mk_nc evs))
+ | otherwise = do { traceTcS "Emitting fresh work" (vcat (map ppr evs))
+ ; updWorkListTcS (extendWorkListCts (map mk_nc evs)) }
where
mk_nc ev = mkNonCanonical ev
@@ -889,7 +901,8 @@ canDecomposableTyConApp ev tc1 tys1 tc2 tys2
-- Fail straight away for better error messages
= canEqFailure ev (mkTyConApp tc1 tys1) (mkTyConApp tc2 tys2)
| otherwise
- = canDecomposableTyConAppOK ev tc1 tys1 tys2
+ = do { traceTcS "canDecomposableTyConApp" (ppr ev $$ ppr tc1 $$ ppr tys1 $$ ppr tys2)
+ ; canDecomposableTyConAppOK ev tc1 tys1 tys2 }
canDecomposableTyConAppOK :: CtEvidence
-> TyCon -> [TcType] -> [TcType]
@@ -1143,7 +1156,7 @@ canEqTyVar2 :: DynFlags
-> TcS StopOrContinue
-- LHS is an inert type variable,
-- and RHS is fully rewritten, but with type synonyms
--- preserved as must as possible
+-- preserved as much as possible
canEqTyVar2 dflags ev swapped tv1 xi2 co2
| Just tv2 <- getTyVar_maybe xi2
@@ -1173,6 +1186,9 @@ canEqTyVar2 dflags ev swapped tv1 xi2 co2
; case mb of
Nothing -> return ()
Just new_ev -> emitInsoluble (mkNonCanonical new_ev)
+ -- If we have a ~ [a], it is not canonical, and in particular
+ -- we don't want to rewrite existing inerts with it, otherwise
+ -- we'd risk divergence in the constraint solver
; return Stop }
where
xi1 = mkTyVarTy tv1
@@ -1214,7 +1230,7 @@ canEqTyVarTyVar ev swapped tv1 tv2 co2
-> continueWith (CTyEqCan { cc_ev = new_ev
, cc_tyvar = tv1, cc_rhs = xi2 })
| otherwise
- -> checkKind ev xi1 k1 xi2 k2 }
+ -> checkKind new_ev xi1 k1 xi2 k2 }
where
reorient_me
| k1 `tcEqKind` k2 = tv2 `better_than` tv1
@@ -1241,20 +1257,21 @@ checkKind :: CtEvidence -- t1~t2
-- for the type equality; and continue with the kind equality constraint.
-- When the latter is solved, it'll kick out the irreducible equality for
-- a second attempt at solving
+--
+-- See Note [Equalities with incompatible kinds]
checkKind new_ev s1 k1 s2 k2 -- See Note [Equalities with incompatible kinds]
= ASSERT( isKind k1 && isKind k2 )
do { traceTcS "canEqLeaf: incompatible kinds" (vcat [ppr k1, ppr k2])
- -- Put the not-currently-soluble thing back onto the work list
- ; updWorkListTcS $ extendWorkListNonEq $
- CIrredEvCan { cc_ev = new_ev }
-
-- Create a derived kind-equality, and solve it
- ; mw <- newDerived kind_co_loc (mkEqPred k1 k2)
+ ; mw <- newDerived kind_co_loc (mkTcEqPred k1 k2)
; case mw of
- Nothing -> return Stop
- Just kev -> canEqNC kev k1 k2 }
+ Nothing -> return ()
+ Just kev -> emitWorkNC [kev]
+
+ -- Put the not-currently-soluble thing into the inert set
+ ; continueWith (CIrredEvCan { cc_ev = new_ev }) }
where
loc = ctev_loc new_ev
kind_co_loc = setCtLocOrigin loc (KindEqOrigin s1 s2 (ctLocOrigin loc))
@@ -1294,8 +1311,8 @@ a well-kinded type ill-kinded; and that is bad (eg typeKind can crash, see
Trac #7696).
So instead for these ill-kinded equalities we generate a CIrredCan,
-which keeps it out of the way until a subsequent substitution (on kind
-variables, say) re-activates it.
+and put it in the inert set, which keeps it out of the way until a
+subsequent substitution (on kind variables, say) re-activates it.
NB: it is important that the types s1,s2 are flattened and zonked
so that their kinds k1, k2 are inert wrt the substitution. That
@@ -1304,6 +1321,11 @@ NB: it is important that the types s1,s2 are flattened and zonked
E.g. it is WRONG to make an irred (a:k1)~(b:k2)
if we already have a substitution k1:=k2
+NB: it's important that the new CIrredCan goes in the inert set rather
+than back into the work list. We used to do the latter, but that led
+to an infinite loop when we encountered it again, and put it back in
+the work list again.
+
See also Note [Kind orientation for CTyEqCan] and
Note [Kind orientation for CFunEqCan] in TcRnTypes
diff --git a/compiler/typecheck/TcClassDcl.lhs b/compiler/typecheck/TcClassDcl.lhs
index f61f48e92aa6..be5a74f294ef 100644
--- a/compiler/typecheck/TcClassDcl.lhs
+++ b/compiler/typecheck/TcClassDcl.lhs
@@ -6,7 +6,8 @@
Typechecking class declarations
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -121,7 +122,7 @@ tcClassSigs clas sigs def_methods
vanilla_sigs = [L loc (nm,ty) | L loc (TypeSig nm ty) <- sigs]
gen_sigs = [L loc (nm,ty) | L loc (GenericSig nm ty) <- sigs]
dm_bind_names :: [Name] -- These ones have a value binding in the class decl
- dm_bind_names = [op | (_, L _ (FunBind {fun_id = L _ op})) <- bagToList def_methods]
+ dm_bind_names = [op | L _ (FunBind {fun_id = L _ op}) <- bagToList def_methods]
tc_sig genop_env (op_names, op_hs_ty)
= do { traceTc "ClsSig 1" (ppr op_names)
@@ -238,18 +239,18 @@ tcDefMeth clas tyvars this_dict binds_in hs_sig_fn prag_fn (sel_id, dm_info)
---------------
tcInstanceMethodBody :: SkolemInfo -> [TcTyVar] -> [EvVar]
-> Id -> TcSigInfo
- -> TcSpecPrags -> (Origin, LHsBind Name)
- -> TcM (Origin, LHsBind Id)
+ -> TcSpecPrags -> LHsBind Name
+ -> TcM (LHsBind Id)
tcInstanceMethodBody skol_info tyvars dfun_ev_vars
meth_id local_meth_sig
- specs (origin, (L loc bind))
+ specs (L loc bind)
= do { let local_meth_id = sig_id local_meth_sig
lm_bind = L loc (bind { fun_id = L loc (idName local_meth_id) })
-- Substitute the local_meth_name for the binder
-- NB: the binding is always a FunBind
; (ev_binds, (tc_bind, _, _))
<- checkConstraints skol_info tyvars dfun_ev_vars $
- tcPolyCheck NonRecursive no_prag_fn local_meth_sig (origin, lm_bind)
+ tcPolyCheck NonRecursive no_prag_fn local_meth_sig lm_bind
; let export = ABE { abe_wrap = idHsWrapper, abe_poly = meth_id
, abe_mono = local_meth_id, abe_prags = specs }
@@ -258,7 +259,7 @@ tcInstanceMethodBody skol_info tyvars dfun_ev_vars
, abs_ev_binds = ev_binds
, abs_binds = tc_bind }
- ; return (origin, L loc full_bind) }
+ ; return (L loc full_bind) }
where
no_prag_fn _ = [] -- No pragmas for local_meth_id;
-- they are all for meth_id
@@ -326,14 +327,14 @@ lookupHsSig = lookupNameEnv
---------------------------
findMethodBind :: Name -- Selector name
-> LHsBinds Name -- A group of bindings
- -> Maybe ((Origin, LHsBind Name), SrcSpan)
+ -> Maybe (LHsBind Name, SrcSpan)
-- Returns the binding, and the binding
-- site of the method binder
findMethodBind sel_name binds
= foldlBag mplus Nothing (mapBag f binds)
- where
- f bind@(_, L _ (FunBind { fun_id = L bndr_loc op_name }))
- | op_name == sel_name
+ where
+ f bind@(L _ (FunBind { fun_id = L bndr_loc op_name }))
+ | op_name == sel_name
= Just (bind, bndr_loc)
f _other = Nothing
@@ -341,8 +342,9 @@ findMethodBind sel_name binds
findMinimalDef :: [LSig Name] -> Maybe ClassMinimalDef
findMinimalDef = firstJusts . map toMinimalDef
where
+ toMinimalDef :: LSig Name -> Maybe ClassMinimalDef
toMinimalDef (L _ (MinimalSig bf)) = Just (fmap unLoc bf)
- toMinimalDef _ = Nothing
+ toMinimalDef _ = Nothing
\end{code}
Note [Polymorphic methods]
diff --git a/compiler/typecheck/TcDefaults.lhs b/compiler/typecheck/TcDefaults.lhs
index d05a9485f826..7b5bd27321e4 100644
--- a/compiler/typecheck/TcDefaults.lhs
+++ b/compiler/typecheck/TcDefaults.lhs
@@ -5,7 +5,7 @@
\section[TcDefaults]{Typechecking \tr{default} declarations}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -39,7 +39,7 @@ tcDefaults :: [LDefaultDecl Name]
tcDefaults []
= getDeclaredDefaultTys -- No default declaration, so get the
-- default types from the envt;
- -- i.e. use the curent ones
+ -- i.e. use the current ones
-- (the caller will put them back there)
-- It's important not to return defaultDefaultTys here (which
-- we used to do) because in a TH program, tcDefaults [] is called
diff --git a/compiler/typecheck/TcDeriv.lhs b/compiler/typecheck/TcDeriv.lhs
index db79061e2f19..6812ac738756 100644
--- a/compiler/typecheck/TcDeriv.lhs
+++ b/compiler/typecheck/TcDeriv.lhs
@@ -6,6 +6,8 @@
Handles @deriving@ clauses on @data@ declarations.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcDeriv ( tcDeriving ) where
#include "HsVersions.h"
@@ -18,7 +20,7 @@ import FamInst
import TcErrors( reportAllUnsolved )
import TcValidity( validDerivPred )
import TcEnv
-import TcTyClsDecls( tcFamTyPats, tcAddDataFamInstCtxt )
+import TcTyClsDecls( tcFamTyPats, famTyConShape, tcAddDataFamInstCtxt, kcDataDefn )
import TcClassDcl( tcAddDeclCtxt ) -- Small helper
import TcGenDeriv -- Deriv stuff
import TcGenGenerics
@@ -36,7 +38,7 @@ import RnSource ( addTcgDUs )
import HscTypes
import Avail
-import Unify( tcMatchTy )
+import Unify( tcUnifyTy )
import Id( idType )
import Class
import Type
@@ -60,7 +62,6 @@ import Outputable
import FastString
import Bag
import Pair
-import BasicTypes (Origin(..))
import Control.Monad
import Data.List
@@ -85,13 +86,14 @@ Overall plan
\begin{code}
-- DerivSpec is purely local to this module
data DerivSpec theta = DS { ds_loc :: SrcSpan
- , ds_name :: Name
+ , ds_name :: Name -- DFun name
, ds_tvs :: [TyVar]
, ds_theta :: theta
, ds_cls :: Class
, ds_tys :: [Type]
, ds_tc :: TyCon
, ds_tc_args :: [Type]
+ , ds_overlap :: Maybe OverlapMode
, ds_newtype :: Bool }
-- This spec implies a dfun declaration of the form
-- df :: forall tvs. theta => C tys
@@ -105,7 +107,7 @@ data DerivSpec theta = DS { ds_loc :: SrcSpan
-- the theta is either the given and final theta, in standalone deriving,
-- or the not-yet-simplified list of constraints together with their origin
- -- ds_newtype = True <=> Newtype deriving
+ -- ds_newtype = True <=> Generalised Newtype Deriving (GND)
-- False <=> Vanilla deriving
\end{code}
@@ -158,6 +160,10 @@ earlyDSLoc :: EarlyDerivSpec -> SrcSpan
earlyDSLoc (InferTheta spec) = ds_loc spec
earlyDSLoc (GivenTheta spec) = ds_loc spec
+earlyDSClass :: EarlyDerivSpec -> Class
+earlyDSClass (InferTheta spec) = ds_cls spec
+earlyDSClass (GivenTheta spec) = ds_cls spec
+
splitEarlyDerivSpec :: [EarlyDerivSpec] -> ([DerivSpec ThetaOrigin], [DerivSpec ThetaType])
splitEarlyDerivSpec [] = ([],[])
splitEarlyDerivSpec (InferTheta spec : specs) =
@@ -437,7 +443,7 @@ commonAuxiliaries = foldM snoc ([], emptyBag) where
renameDeriv :: Bool
-> [InstInfo RdrName]
- -> Bag ((Origin, LHsBind RdrName), LSig RdrName)
+ -> Bag (LHsBind RdrName, LSig RdrName)
-> TcM (Bag (InstInfo Name), HsValBinds Name, DefUses)
renameDeriv is_boot inst_infos bagBinds
| is_boot -- If we are compiling a hs-boot file, don't generate any derived bindings
@@ -469,11 +475,13 @@ renameDeriv is_boot inst_infos bagBinds
where
rn_inst_info :: InstInfo RdrName -> TcM (InstInfo Name, FreeVars)
- rn_inst_info inst_info@(InstInfo { iSpec = inst
- , iBinds = InstBindings
- { ib_binds = binds
- , ib_pragmas = sigs
- , ib_standalone_deriving = sa } })
+ rn_inst_info
+ inst_info@(InstInfo { iSpec = inst
+ , iBinds = InstBindings
+ { ib_binds = binds
+ , ib_pragmas = sigs
+ , ib_extensions = exts -- only for type-checking
+ , ib_standalone_deriving = sa } })
= -- Bring the right type variables into
-- scope (yuk), and rename the method binds
ASSERT( null sigs )
@@ -481,6 +489,7 @@ renameDeriv is_boot inst_infos bagBinds
do { (rn_binds, fvs) <- rnMethodBinds (is_cls_nm inst) (\_ -> []) binds
; let binds' = InstBindings { ib_binds = rn_binds
, ib_pragmas = []
+ , ib_extensions = exts
, ib_standalone_deriving = sa }
; return (inst_info { iBinds = binds' }, fvs) }
where
@@ -521,66 +530,60 @@ makeDerivSpecs :: Bool
-> [LDerivDecl Name]
-> TcM [EarlyDerivSpec]
makeDerivSpecs is_boot tycl_decls inst_decls deriv_decls
- = do { eqns1 <- concatMapM (recoverM (return []) . deriveTyDecl) tycl_decls
- ; eqns2 <- concatMapM (recoverM (return []) . deriveInstDecl) inst_decls
- ; eqns3 <- mapAndRecoverM deriveStandalone deriv_decls
- ; let eqns = eqns1 ++ eqns2 ++ eqns3
+ = do { eqns1 <- concatMapM (recoverM (return []) . deriveTyDecl) tycl_decls
+ ; eqns2 <- concatMapM (recoverM (return []) . deriveInstDecl) inst_decls
+ ; eqns3 <- concatMapM (recoverM (return []) . deriveStandalone) deriv_decls
-- If AutoDeriveTypeable is set, we automatically add Typeable instances
-- for every data type and type class declared in the module
- ; isAutoTypeable <- xoptM Opt_AutoDeriveTypeable
- ; let eqns4 = if isAutoTypeable then deriveTypeable tycl_decls eqns else []
- ; eqns4' <- mapAndRecoverM deriveStandalone eqns4
- ; let eqns' = eqns ++ eqns4'
+ ; auto_typeable <- xoptM Opt_AutoDeriveTypeable
+ ; eqns4 <- deriveAutoTypeable auto_typeable (eqns1 ++ eqns3) tycl_decls
+
+ ; let eqns = eqns1 ++ eqns2 ++ eqns3 ++ eqns4
; if is_boot then -- No 'deriving' at all in hs-boot files
- do { unless (null eqns') (add_deriv_err (head eqns'))
+ do { unless (null eqns) (add_deriv_err (head eqns))
; return [] }
- else return eqns' }
+ else return eqns }
where
- deriveTypeable :: [LTyClDecl Name] -> [EarlyDerivSpec] -> [LDerivDecl Name]
- deriveTypeable tys dss =
- [ L l (DerivDecl (L l (HsAppTy (noLoc (HsTyVar typeableClassName))
- (L l (HsTyVar (tcdName t))))))
- | L l t <- tys
- -- Don't add Typeable instances for type synonyms and type families
- , not (isSynDecl t), not (isTypeFamilyDecl t)
- -- ... nor if the user has already given a deriving clause
- , not (hasInstance (tcdName t) dss) ]
-
- -- Check if an automatically generated DS for deriving Typeable should be
- -- ommitted because the user had manually requested for an instance
- hasInstance :: Name -> [EarlyDerivSpec] -> Bool
- hasInstance n = any (\ds -> n == tyConName (earlyDSTyCon ds))
-
add_deriv_err eqn
= setSrcSpan (earlyDSLoc eqn) $
addErr (hang (ptext (sLit "Deriving not permitted in hs-boot file"))
2 (ptext (sLit "Use an instance declaration instead")))
+deriveAutoTypeable :: Bool -> [EarlyDerivSpec] -> [LTyClDecl Name] -> TcM [EarlyDerivSpec]
+-- Runs over *all* TyCl declarations, including classes and data families
+-- i.e. not just data type decls
+deriveAutoTypeable auto_typeable done_specs tycl_decls
+ | not auto_typeable = return []
+ | otherwise = do { cls <- tcLookupClass typeableClassName
+ ; concatMapM (do_one cls) tycl_decls }
+ where
+ done_tcs = mkNameSet [ tyConName (earlyDSTyCon spec)
+ | spec <- done_specs
+ , className (earlyDSClass spec) == typeableClassName ]
+ -- Check if an automatically generated DS for deriving Typeable should be
+ -- ommitted because the user had manually requested an instance
+
+ do_one cls (L _ decl)
+ = do { tc <- tcLookupTyCon (tcdName decl)
+ ; if (isSynTyCon tc || tyConName tc `elemNameSet` done_tcs)
+ -- Do not derive Typeable for type synonyms or type families
+ then return []
+ else mkPolyKindedTypeableEqn cls tc }
+
------------------------------------------------------------------
deriveTyDecl :: LTyClDecl Name -> TcM [EarlyDerivSpec]
-deriveTyDecl (L _ decl@(DataDecl { tcdLName = L loc tc_name
+deriveTyDecl (L _ decl@(DataDecl { tcdLName = L _ tc_name
, tcdDataDefn = HsDataDefn { dd_derivs = preds } }))
= tcAddDeclCtxt decl $
do { tc <- tcLookupTyCon tc_name
; let tvs = tyConTyVars tc
tys = mkTyVarTys tvs
- pdcs :: [LDerivDecl Name]
- pdcs = [ L loc (DerivDecl (L loc (HsAppTy (noLoc (HsTyVar typeableClassName))
- (L loc (HsTyVar (tyConName pdc))))))
- | Just pdc <- map promoteDataCon_maybe (tyConDataCons tc) ]
- -- If AutoDeriveTypeable and DataKinds is set, we add Typeable instances
- -- for every promoted data constructor of datatypes in this module
- ; isAutoTypeable <- xoptM Opt_AutoDeriveTypeable
- ; isDataKinds <- xoptM Opt_DataKinds
- ; prom_dcs_Typeable_instances <- if isAutoTypeable && isDataKinds
- then mapM deriveStandalone pdcs
- else return []
- ; other_instances <- case preds of
- Just preds' -> mapM (deriveTyData tvs tc tys) preds'
- Nothing -> return []
- ; return (prom_dcs_Typeable_instances ++ other_instances) }
+
+ ; case preds of
+ Just preds' -> concatMapM (deriveTyData False tvs tc tys) preds'
+ Nothing -> return [] }
deriveTyDecl _ = return []
@@ -595,32 +598,49 @@ deriveInstDecl (L _ (ClsInstD { cid_inst = ClsInstDecl { cid_datafam_insts = fam
------------------------------------------------------------------
deriveFamInst :: DataFamInstDecl Name -> TcM [EarlyDerivSpec]
deriveFamInst decl@(DataFamInstDecl { dfid_tycon = L _ tc_name, dfid_pats = pats
- , dfid_defn = HsDataDefn { dd_derivs = Just preds } })
+ , dfid_defn = defn@(HsDataDefn { dd_derivs = Just preds }) })
= tcAddDataFamInstCtxt decl $
do { fam_tc <- tcLookupTyCon tc_name
- ; tcFamTyPats tc_name (tyConKind fam_tc) pats (\_ -> return ()) $
+ ; tcFamTyPats (famTyConShape fam_tc) pats (kcDataDefn defn) $
+ -- kcDataDefn defn: see Note [Finding the LHS patterns]
\ tvs' pats' _ ->
- mapM (deriveTyData tvs' fam_tc pats') preds }
- -- Tiresomely we must figure out the "lhs", which is awkward for type families
- -- E.g. data T a b = .. deriving( Eq )
- -- Here, the lhs is (T a b)
- -- data instance TF Int b = ... deriving( Eq )
- -- Here, the lhs is (TF Int b)
- -- But if we just look up the tycon_name, we get is the *family*
- -- tycon, but not pattern types -- they are in the *rep* tycon.
+ concatMapM (deriveTyData True tvs' fam_tc pats') preds }
deriveFamInst _ = return []
+\end{code}
+
+Note [Finding the LHS patterns]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When kind polymorphism is in play, we need to be careful. Here is
+Trac #9359:
+ data Cmp a where
+ Sup :: Cmp a
+ V :: a -> Cmp a
+
+ data family CmpInterval (a :: Cmp k) (b :: Cmp k) :: *
+ data instance CmpInterval (V c) Sup = Starting c deriving( Show )
+
+So CmpInterval is kind-polymorphic, but the data instance is not
+ CmpInterval :: forall k. Cmp k -> Cmp k -> *
+ data instance CmpInterval * (V (c::*)) Sup = Starting c deriving( Show )
+Hence, when deriving the type patterns in deriveFamInst, we must kind
+check the RHS (the data constructor 'Starting c') as well as the LHS,
+so that we correctly see the instantiation to *.
+
+
+\begin{code}
------------------------------------------------------------------
-deriveStandalone :: LDerivDecl Name -> TcM EarlyDerivSpec
+deriveStandalone :: LDerivDecl Name -> TcM [EarlyDerivSpec]
-- Standalone deriving declarations
-- e.g. deriving instance Show a => Show (T a)
-- Rather like tcLocalInstDecl
-deriveStandalone (L loc (DerivDecl deriv_ty))
+deriveStandalone (L loc (DerivDecl deriv_ty overlap_mode))
= setSrcSpan loc $
addErrCtxt (standaloneCtxt deriv_ty) $
do { traceTc "Standalone deriving decl for" (ppr deriv_ty)
- ; (tvs, theta, cls, inst_tys) <- tcHsInstHead TcType.InstDeclCtxt deriv_ty
+ ; (tvs, theta, cls, inst_tys) <- setXOptM Opt_DataKinds $ -- for polykinded typeable
+ tcHsInstHead TcType.InstDeclCtxt deriv_ty
; traceTc "Standalone deriving;" $ vcat
[ text "tvs:" <+> ppr tvs
, text "theta:" <+> ppr theta
@@ -637,25 +657,74 @@ deriveStandalone (L loc (DerivDecl deriv_ty))
, text "type:" <+> ppr inst_ty ]
; case tcSplitTyConApp_maybe inst_ty of
- Just (tycon, tc_args)
- | className cls == typeableClassName || isAlgTyCon tycon
- -> mkEqnHelp tvs cls cls_tys tycon tc_args (Just theta)
+ Just (tc, tc_args)
+ | className cls == typeableClassName -- Works for algebraic TyCons
+ -- _and_ data families
+ -> do { check_standalone_typeable theta tc tc_args
+ ; mkPolyKindedTypeableEqn cls tc }
+
+ | isAlgTyCon tc -- All other classes
+ -> do { spec <- mkEqnHelp overlap_mode tvs cls cls_tys tc tc_args (Just theta)
+ ; return [spec] }
_ -> -- Complain about functions, primitive types, etc,
-- except for the Typeable class
failWithTc $ derivingThingErr False cls cls_tys inst_ty $
ptext (sLit "The last argument of the instance must be a data or newtype application")
}
+ where
+ check_standalone_typeable theta tc tc_args
+ -- We expect to see
+ -- deriving Typeable T
+ -- for some tycon T. But if S is kind-polymorphic,
+ -- say (S :: forall k. k -> *), we might see
+ -- deriving Typable (S k)
+ --
+ -- But we should NOT see
+ -- deriving Typeable (T Int)
+ -- or deriving Typeable (S *) where S is kind-polymorphic
+ --
+ -- So all the tc_args should be distinct kind variables
+ | null theta
+ , allDistinctTyVars tc_args
+ , all is_kind_var tc_args
+ = return ()
+
+ | otherwise
+ = do { polykinds <- xoptM Opt_PolyKinds
+ ; failWith (mk_msg polykinds theta tc tc_args) }
+
+ is_kind_var tc_arg = case tcGetTyVar_maybe tc_arg of
+ Just v -> isKindVar v
+ Nothing -> False
+
+ mk_msg polykinds theta tc tc_args
+ | not polykinds
+ , all isKind tc_args -- Non-empty, all kinds, at least one not a kind variable
+ , null theta
+ = hang (ptext (sLit "To make a Typeable instance of poly-kinded")
+ <+> quotes (ppr tc) <> comma)
+ 2 (ptext (sLit "use XPolyKinds"))
+
+ | otherwise
+ = hang (ptext (sLit "Derived Typeable instance must be of form"))
+ 2 (ptext (sLit "deriving instance Typeable") <+> ppr tc)
+
------------------------------------------------------------------
-deriveTyData :: [TyVar] -> TyCon -> [Type] -- LHS of data or data instance
+deriveTyData :: Bool -- False <=> data/newtype
+ -- True <=> data/newtype *instance*
+ -> [TyVar] -> TyCon -> [Type] -- LHS of data or data instance
+ -- Can be a data instance, hence [Type] args
-> LHsType Name -- The deriving predicate
- -> TcM EarlyDerivSpec
+ -> TcM [EarlyDerivSpec]
-- The deriving clause of a data or newtype declaration
-deriveTyData tvs tc tc_args (L loc deriv_pred)
+-- I.e. not standalone deriving
+deriveTyData is_instance tvs tc tc_args (L loc deriv_pred)
= setSrcSpan loc $ -- Use the location of the 'deriving' item
- do { (deriv_tvs, cls, cls_tys) <- tcExtendTyVarEnv tvs $
- tcHsDeriv deriv_pred
+ do { (deriv_tvs, cls, cls_tys, cls_arg_kind)
+ <- tcExtendTyVarEnv tvs $
+ tcHsDeriv deriv_pred
-- Deriving preds may (now) mention
-- the type variables for the type constructor, hence tcExtendTyVarenv
-- The "deriv_pred" is a LHsType to take account of the fact that for
@@ -665,43 +734,47 @@ deriveTyData tvs tc tc_args (L loc deriv_pred)
-- so the argument kind 'k' is not decomposable by splitKindFunTys
-- as is the case for all other derivable type classes
; if className cls == typeableClassName
- then derivePolyKindedTypeable cls cls_tys tvs tc tc_args
- else do {
-
- -- Given data T a b c = ... deriving( C d ),
- -- we want to drop type variables from T so that (C d (T a)) is well-kinded
- ; let cls_tyvars = classTyVars cls
- ; checkTc (not (null cls_tyvars)) derivingNullaryErr
+ then derivePolyKindedTypeable is_instance cls cls_tys tvs tc tc_args
+ else
- ; let kind = tyVarKind (last cls_tyvars)
- (arg_kinds, _) = splitKindFunTys kind
+ do { -- Given data T a b c = ... deriving( C d ),
+ -- we want to drop type variables from T so that (C d (T a)) is well-kinded
+ let (arg_kinds, _) = splitKindFunTys cls_arg_kind
n_args_to_drop = length arg_kinds
n_args_to_keep = tyConArity tc - n_args_to_drop
args_to_drop = drop n_args_to_keep tc_args
tc_args_to_keep = take n_args_to_keep tc_args
inst_ty_kind = typeKind (mkTyConApp tc tc_args_to_keep)
dropped_tvs = tyVarsOfTypes args_to_drop
- tv_set = mkVarSet tvs
- mb_match = tcMatchTy tv_set inst_ty_kind kind
- Just subst = mb_match -- See Note [Match kinds in deriving]
- final_tc_args = substTys subst tc_args_to_keep
- univ_tvs = mkVarSet deriv_tvs `unionVarSet` tyVarsOfTypes final_tc_args
-
- ; traceTc "derivTyData1" (vcat [ pprTvBndrs tvs, ppr tc, ppr tc_args
+ -- Match up the kinds, and apply the resulting kind substitution
+ -- to the types. See Note [Unify kinds in deriving]
+ -- We are assuming the tycon tyvars and the class tyvars are distinct
+ mb_match = tcUnifyTy inst_ty_kind cls_arg_kind
+ Just kind_subst = mb_match
+ (univ_kvs, univ_tvs) = partition isKindVar $ varSetElems $
+ mkVarSet deriv_tvs `unionVarSet`
+ tyVarsOfTypes tc_args_to_keep
+ univ_kvs' = filter (`notElemTvSubst` kind_subst) univ_kvs
+ (subst', univ_tvs') = mapAccumL substTyVarBndr kind_subst univ_tvs
+ final_tc_args = substTys subst' tc_args_to_keep
+ final_cls_tys = substTys subst' cls_tys
+
+ ; traceTc "derivTyData1" (vcat [ pprTvBndrs tvs, ppr tc, ppr tc_args, ppr deriv_pred
, pprTvBndrs (varSetElems $ tyVarsOfTypes tc_args)
, ppr n_args_to_keep, ppr n_args_to_drop
- , ppr inst_ty_kind, ppr kind, ppr mb_match ])
+ , ppr inst_ty_kind, ppr cls_arg_kind, ppr mb_match
+ , ppr final_tc_args, ppr final_cls_tys ])
-- Check that the result really is well-kinded
; checkTc (n_args_to_keep >= 0 && isJust mb_match)
- (derivingKindErr tc cls cls_tys kind)
+ (derivingKindErr tc cls cls_tys cls_arg_kind)
; traceTc "derivTyData2" (vcat [ ppr univ_tvs ])
- ; checkTc (allDistinctTyVars args_to_drop && -- (a) and (b)
- univ_tvs `disjointVarSet` dropped_tvs) -- (c)
- (derivingEtaErr cls cls_tys (mkTyConApp tc final_tc_args))
+ ; checkTc (allDistinctTyVars args_to_drop && -- (a) and (b)
+ not (any (`elemVarSet` dropped_tvs) univ_tvs)) -- (c)
+ (derivingEtaErr cls final_cls_tys (mkTyConApp tc final_tc_args))
-- Check that
-- (a) The args to drop are all type variables; eg reject:
-- data instance T a Int = .... deriving( Monad )
@@ -713,36 +786,32 @@ deriveTyData tvs tc tc_args (L loc deriv_pred)
-- newtype T a s = ... deriving( ST s )
-- newtype K a a = ... deriving( Monad )
- ; mkEqnHelp (varSetElemsKvsFirst univ_tvs)
- cls cls_tys tc final_tc_args Nothing } }
+ ; spec <- mkEqnHelp Nothing (univ_kvs' ++ univ_tvs')
+ cls final_cls_tys tc final_tc_args Nothing
+ ; return [spec] } }
-derivePolyKindedTypeable :: Class -> [Type]
+derivePolyKindedTypeable :: Bool -> Class -> [Type]
-> [TyVar] -> TyCon -> [Type]
- -> TcM EarlyDerivSpec
-derivePolyKindedTypeable cls cls_tys _tvs tc tc_args
- = do { checkTc (isSingleton cls_tys) $ -- Typeable k
+ -> TcM [EarlyDerivSpec]
+-- The deriving( Typeable ) clause of a data/newtype decl
+-- I.e. not standalone deriving
+derivePolyKindedTypeable is_instance cls cls_tys _tvs tc tc_args
+ | is_instance
+ = failWith (sep [ ptext (sLit "Deriving Typeable is not allowed for family instances;")
+ , ptext (sLit "derive Typeable for")
+ <+> quotes (pprSourceTyCon tc)
+ <+> ptext (sLit "alone") ])
+
+ | otherwise
+ = ASSERT( allDistinctTyVars tc_args ) -- Came from a data/newtype decl
+ do { checkTc (isSingleton cls_tys) $ -- Typeable k
derivingThingErr False cls cls_tys (mkTyConApp tc tc_args)
(classArgsErr cls cls_tys)
- -- Check that we have not said, for example
- -- deriving Typeable (T Int)
- -- or deriving Typeable (S :: * -> *) where S is kind-polymorphic
- ; checkTc (allDistinctTyVars tc_args) $
- derivingEtaErr cls cls_tys (mkTyConApp tc tc_kind_args)
-
- ; mkEqnHelp kind_vars cls cls_tys tc tc_kind_args Nothing }
- where
- kind_vars = kindVarsOnly tc_args
- tc_kind_args = mkTyVarTys kind_vars
-
- kindVarsOnly :: [Type] -> [KindVar]
- kindVarsOnly [] = []
- kindVarsOnly (t:ts) | Just v <- getTyVar_maybe t
- , isKindVar v = v : kindVarsOnly ts
- | otherwise = kindVarsOnly ts
+ ; mkPolyKindedTypeableEqn cls tc }
\end{code}
-Note [Match kinds in deriving]
+Note [Unify kinds in deriving]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider (Trac #8534)
data T a b = MkT a deriving( Functor )
@@ -752,16 +821,55 @@ So T :: forall k. * -> k -> *. We want to get
instance Functor (T * (a:*)) where ...
Notice the '*' argument to T.
-So we need to (a) drop arguments from (T a b) to match the number of
-arrows in the (last argument of the) class; and then match kind of the
-remaining type against the expected kind, to figur out how to
-instantiate T's kind arguments. Hence we match
- kind( T k (a:k) ) ~ (* -> *)
-to find k:=*. Tricky stuff.
+Moreover, as well as instantiating T's kind arguments, we may need to instantiate
+C's kind args. Consider (Trac #8865):
+ newtype T a b = MkT (Either a b) deriving( Category )
+where
+ Category :: forall k. (k -> k -> *) -> Constraint
+We need to generate the instance
+ insatnce Category * (Either a) where ...
+Notice the '*' argument to Cagegory.
+
+So we need to
+ * drop arguments from (T a b) to match the number of
+ arrows in the (last argument of the) class;
+ * and then *unify* kind of the remaining type against the
+ expected kind, to figure out how to instantiate C's and T's
+ kind arguments.
+
+In the two examples,
+ * we unify kind-of( T k (a:k) ) ~ kind-of( Functor )
+ i.e. (k -> *) ~ (* -> *) to find k:=*.
+ yielding k:=*
+
+ * we unify kind-of( Either ) ~ kind-of( Category )
+ i.e. (* -> * -> *) ~ (k -> k -> k)
+ yielding k:=*
+
+Now we get a kind substitution. We then need to:
+
+ 1. Remove the substituted-out kind variables from the quantified kind vars
+
+ 2. Apply the substitution to the kinds of quantified *type* vars
+ (and extend the substitution to reflect this change)
+
+ 3. Apply that extended substitution to the non-dropped args (types and
+ kinds) of the type and class
+
+Forgetting step (2) caused Trac #8893:
+ data V a = V [a] deriving Functor
+ data P (x::k->*) (a:k) = P (x a) deriving Functor
+ data C (x::k->*) (a:k) = C (V (P x a)) deriving Functor
+
+When deriving Functor for P, we unify k to *, but we then want
+an instance $df :: forall (x:*->*). Functor x => Functor (P * (x:*->*))
+and similarly for C. Notice the modified kind of x, both at binding
+and occurrence sites.
\begin{code}
-mkEqnHelp :: [TyVar]
+mkEqnHelp :: Maybe OverlapMode
+ -> [TyVar]
-> Class -> [Type]
-> TyCon -> [Type]
-> DerivContext -- Just => context supplied (standalone deriving)
@@ -772,18 +880,12 @@ mkEqnHelp :: [TyVar]
-- where the 'theta' is optional (that's the Maybe part)
-- Assumes that this declaration is well-kinded
-mkEqnHelp tvs cls cls_tys tycon tc_args mtheta
+mkEqnHelp overlap_mode tvs cls cls_tys tycon tc_args mtheta
| className cls `elem` oldTypeableClassNames
= do { dflags <- getDynFlags
; case checkOldTypeableConditions (dflags, tycon, tc_args) of
- Just err -> bale_out err
- Nothing -> mkOldTypeableEqn tvs cls tycon tc_args mtheta }
-
- | className cls == typeableClassName -- Polykinded Typeable
- = do { dflags <- getDynFlags
- ; case checkTypeableConditions (dflags, tycon, tc_args) of
- Just err -> bale_out err
- Nothing -> mkPolyKindedTypeableEqn tvs cls tycon tc_args mtheta }
+ NotValid err -> bale_out err
+ IsValid -> mkOldTypeableEqn tvs cls tycon tc_args mtheta }
| otherwise
= do { (rep_tc, rep_tc_args) <- lookup_data_fam tycon tc_args
@@ -814,10 +916,10 @@ mkEqnHelp tvs cls cls_tys tycon tc_args mtheta
; dflags <- getDynFlags
; if isDataTyCon rep_tc then
- mkDataTypeEqn dflags tvs cls cls_tys
+ mkDataTypeEqn dflags overlap_mode tvs cls cls_tys
tycon tc_args rep_tc rep_tc_args mtheta
else
- mkNewTypeEqn dflags tvs cls cls_tys
+ mkNewTypeEqn dflags overlap_mode tvs cls cls_tys
tycon tc_args rep_tc rep_tc_args mtheta }
where
bale_out msg = failWithTc (derivingThingErr False cls cls_tys (mkTyConApp tycon tc_args) msg)
@@ -907,6 +1009,7 @@ See Note [Eta reduction for data family axioms] in TcInstDcls.
\begin{code}
mkDataTypeEqn :: DynFlags
+ -> Maybe OverlapMode
-> [Var] -- Universally quantified type variables in the instance
-> Class -- Class for which we need to derive an instance
-> [Type] -- Other parameters to the class except the last
@@ -918,7 +1021,7 @@ mkDataTypeEqn :: DynFlags
-> DerivContext -- Context of the instance, for standalone deriving
-> TcRn EarlyDerivSpec -- Return 'Nothing' if error
-mkDataTypeEqn dflags tvs cls cls_tys
+mkDataTypeEqn dflags overlap_mode tvs cls cls_tys
tycon tc_args rep_tc rep_tc_args mtheta
= case checkSideConditions dflags mtheta cls cls_tys rep_tc rep_tc_args of
-- NB: pass the *representation* tycon to checkSideConditions
@@ -926,13 +1029,13 @@ mkDataTypeEqn dflags tvs cls cls_tys
NonDerivableClass -> bale_out (nonStdErr cls)
DerivableClassError msg -> bale_out msg
where
- go_for_it = mk_data_eqn tvs cls tycon tc_args rep_tc rep_tc_args mtheta
+ go_for_it = mk_data_eqn overlap_mode tvs cls tycon tc_args rep_tc rep_tc_args mtheta
bale_out msg = failWithTc (derivingThingErr False cls cls_tys (mkTyConApp tycon tc_args) msg)
-mk_data_eqn :: [TyVar] -> Class
+mk_data_eqn :: Maybe OverlapMode -> [TyVar] -> Class
-> TyCon -> [TcType] -> TyCon -> [TcType] -> DerivContext
-> TcM EarlyDerivSpec
-mk_data_eqn tvs cls tycon tc_args rep_tc rep_tc_args mtheta
+mk_data_eqn overlap_mode tvs cls tycon tc_args rep_tc rep_tc_args mtheta
= do loc <- getSrcSpanM
dfun_name <- new_dfun_name cls tycon
case mtheta of
@@ -944,6 +1047,7 @@ mk_data_eqn tvs cls tycon tc_args rep_tc rep_tc_args mtheta
, ds_cls = cls, ds_tys = inst_tys
, ds_tc = rep_tc, ds_tc_args = rep_tc_args
, ds_theta = inferred_constraints
+ , ds_overlap = overlap_mode
, ds_newtype = False }
Just theta -> do -- Specified context
return $ GivenTheta $ DS
@@ -952,6 +1056,7 @@ mk_data_eqn tvs cls tycon tc_args rep_tc rep_tc_args mtheta
, ds_cls = cls, ds_tys = inst_tys
, ds_tc = rep_tc, ds_tc_args = rep_tc_args
, ds_theta = theta
+ , ds_overlap = overlap_mode
, ds_newtype = False }
where
inst_tys = [mkTyConApp tycon tc_args]
@@ -989,45 +1094,41 @@ mkOldTypeableEqn tvs cls tycon tc_args mtheta
DS { ds_loc = loc, ds_name = dfun_name, ds_tvs = []
, ds_cls = cls, ds_tys = [mkTyConApp tycon []]
, ds_tc = tycon, ds_tc_args = []
- , ds_theta = mtheta `orElse` [], ds_newtype = False }) }
+ , ds_theta = mtheta `orElse` []
+ , ds_overlap = Nothing -- Or, Just NoOverlap?
+ , ds_newtype = False }) }
-mkPolyKindedTypeableEqn :: [TyVar] -> Class
- -> TyCon -> [TcType] -> DerivContext
- -> TcM EarlyDerivSpec
+mkPolyKindedTypeableEqn :: Class -> TyCon -> TcM [EarlyDerivSpec]
-- We can arrive here from a 'deriving' clause
-- or from standalone deriving
-mkPolyKindedTypeableEqn tvs cls tycon tc_args mtheta
- = do { -- Check that we have not said, for example
- -- deriving Typeable (T Int)
- -- or deriving Typeable (S :: * -> *) where S is kind-polymorphic
-
- polykinds <- xoptM Opt_PolyKinds
- ; checkTc (all is_kind_var tc_args) (mk_msg polykinds)
- ; dfun_name <- new_dfun_name cls tycon
- ; loc <- getSrcSpanM
- ; let tc_app = mkTyConApp tycon tc_args
- ; return (GivenTheta $
- DS { ds_loc = loc, ds_name = dfun_name
- , ds_tvs = filter isKindVar tvs, ds_cls = cls
- , ds_tys = typeKind tc_app : [tc_app]
- -- Remember, Typeable :: forall k. k -> *
- , ds_tc = tycon, ds_tc_args = tc_args
- , ds_theta = mtheta `orElse` [] -- Context is empty for polykinded Typeable
- , ds_newtype = False }) }
+mkPolyKindedTypeableEqn cls tc
+ = do { dflags <- getDynFlags -- It's awkward to re-used checkFlag here,
+ ; checkTc(xopt Opt_DeriveDataTypeable dflags) -- so we do a DIY job
+ (hang (ptext (sLit "Can't make a Typeable instance of") <+> quotes (ppr tc))
+ 2 (ptext (sLit "You need DeriveDataTypeable to derive Typeable instances")))
+
+ ; loc <- getSrcSpanM
+ ; let prom_dcs = mapMaybe promoteDataCon_maybe (tyConDataCons tc)
+ ; mapM (mk_one loc) (tc : prom_dcs) }
where
- is_kind_var tc_arg = case tcGetTyVar_maybe tc_arg of
- Just v -> isKindVar v
- Nothing -> False
-
- mk_msg polykinds | not polykinds
- , all isKind tc_args -- Non-empty, all kinds, at least one not a kind variable
- = hang (ptext (sLit "To make a Typeable instance of poly-kinded")
- <+> quotes (ppr tycon) <> comma)
- 2 (ptext (sLit "use XPolyKinds"))
- | otherwise
- = ptext (sLit "Derived Typeable instance must be of form")
- <+> parens (ptext (sLit "Typeable") <+> ppr tycon)
-
+ mk_one loc tc = do { traceTc "mkPolyKindedTypeableEqn" (ppr tc)
+ ; dfun_name <- new_dfun_name cls tc
+ ; return $ GivenTheta $
+ DS { ds_loc = loc, ds_name = dfun_name
+ , ds_tvs = kvs, ds_cls = cls
+ , ds_tys = [tc_app_kind, tc_app]
+ -- Remember, Typeable :: forall k. k -> *
+ -- so we must instantiate it appropiately
+ , ds_tc = tc, ds_tc_args = tc_args
+ , ds_theta = [] -- Context is empty for polykinded Typeable
+ , ds_overlap = Nothing
+ -- Perhaps this should be `Just NoOverlap`?
+
+ , ds_newtype = False } }
+ where
+ (kvs,tc_app_kind) = splitForAllTys (tyConKind tc)
+ tc_args = mkTyVarTys kvs
+ tc_app = mkTyConApp tc tc_args
inferConstraints :: Class -> [TcType]
-> TyCon -> [TcType]
@@ -1046,21 +1147,23 @@ inferConstraints cls inst_tys rep_tc rep_tc_args
| otherwise -- The others are a bit more complicated
= ASSERT2( equalLength rep_tc_tvs all_rep_tc_args, ppr cls <+> ppr rep_tc )
- return (stupid_constraints ++ extra_constraints
- ++ sc_constraints
- ++ con_arg_constraints cls get_std_constrained_tys)
-
+ do { traceTc "inferConstraints" (vcat [ppr cls <+> ppr inst_tys, ppr arg_constraints])
+ ; return (stupid_constraints ++ extra_constraints
+ ++ sc_constraints
+ ++ arg_constraints) }
where
+ arg_constraints = con_arg_constraints cls get_std_constrained_tys
+
-- Constraints arising from the arguments of each constructor
con_arg_constraints cls' get_constrained_tys
- = [ mkPredOrigin (DerivOriginDC data_con arg_n) (mkClassPred cls' [arg_ty])
- | data_con <- tyConDataCons rep_tc,
- (arg_n, arg_ty) <-
- ASSERT( isVanillaDataCon data_con )
- zip [1..] $
- get_constrained_tys $
- dataConInstOrigArgTys data_con all_rep_tc_args,
- not (isUnLiftedType arg_ty) ]
+ = [ mkPredOrigin (DerivOriginDC data_con arg_n) (mkClassPred cls' [inner_ty])
+ | data_con <- tyConDataCons rep_tc
+ , (arg_n, arg_ty) <- ASSERT( isVanillaDataCon data_con )
+ zip [1..] $ -- ASSERT is precondition of dataConInstOrigArgTys
+ dataConInstOrigArgTys data_con all_rep_tc_args
+ , not (isUnLiftedType arg_ty)
+ , inner_ty <- get_constrained_tys arg_ty ]
+
-- No constraints for unlifted types
-- See Note [Deriving and unboxed types]
@@ -1070,10 +1173,10 @@ inferConstraints cls inst_tys rep_tc rep_tc_args
-- (b) The rep_tc_args will be one short
is_functor_like = getUnique cls `elem` functorLikeClassKeys
- get_std_constrained_tys :: [Type] -> [Type]
- get_std_constrained_tys tys
- | is_functor_like = concatMap (deepSubtypesContaining last_tv) tys
- | otherwise = tys
+ get_std_constrained_tys :: Type -> [Type]
+ get_std_constrained_tys ty
+ | is_functor_like = deepSubtypesContaining last_tv ty
+ | otherwise = [ty]
rep_tc_tvs = tyConTyVars rep_tc
last_tv = last rep_tc_tvs
@@ -1141,18 +1244,17 @@ checkSideConditions :: DynFlags -> DerivContext -> Class -> [TcType]
checkSideConditions dflags mtheta cls cls_tys rep_tc rep_tc_args
| Just cond <- sideConditions mtheta cls
= case (cond (dflags, rep_tc, rep_tc_args)) of
- Just err -> DerivableClassError err -- Class-specific error
- Nothing | null cls_tys -> CanDerive -- All derivable classes are unary, so
- -- cls_tys (the type args other than last)
- -- should be null
+ NotValid err -> DerivableClassError err -- Class-specific error
+ IsValid | null cls_tys -> CanDerive -- All derivable classes are unary, so
+ -- cls_tys (the type args other than last)
+ -- should be null
| otherwise -> DerivableClassError (classArgsErr cls cls_tys) -- e.g. deriving( Eq s )
| otherwise = NonDerivableClass -- Not a standard class
classArgsErr :: Class -> [Type] -> SDoc
classArgsErr cls cls_tys = quotes (ppr (mkClassPred cls cls_tys)) <+> ptext (sLit "is not a class")
-checkTypeableConditions, checkOldTypeableConditions :: Condition
-checkTypeableConditions = checkFlag Opt_DeriveDataTypeable `andCond` cond_TypeableOK
+checkOldTypeableConditions :: Condition
checkOldTypeableConditions = checkFlag Opt_DeriveDataTypeable `andCond` cond_oldTypeableOK
nonStdErr :: Class -> SDoc
@@ -1168,23 +1270,32 @@ sideConditions mtheta cls
| cls_key == ixClassKey = Just (cond_std `andCond` cond_enumOrProduct cls)
| cls_key == boundedClassKey = Just (cond_std `andCond` cond_enumOrProduct cls)
| cls_key == dataClassKey = Just (checkFlag Opt_DeriveDataTypeable `andCond`
- cond_std `andCond` cond_args cls)
+ cond_std `andCond`
+ cond_args cls)
| cls_key == functorClassKey = Just (checkFlag Opt_DeriveFunctor `andCond`
- cond_functorOK True) -- NB: no cond_std!
+ cond_vanilla `andCond`
+ cond_functorOK True)
| cls_key == foldableClassKey = Just (checkFlag Opt_DeriveFoldable `andCond`
+ cond_vanilla `andCond`
cond_functorOK False) -- Functor/Fold/Trav works ok for rank-n types
| cls_key == traversableClassKey = Just (checkFlag Opt_DeriveTraversable `andCond`
+ cond_vanilla `andCond`
cond_functorOK False)
- | cls_key == genClassKey = Just (cond_RepresentableOk `andCond`
- checkFlag Opt_DeriveGeneric)
- | cls_key == gen1ClassKey = Just (cond_Representable1Ok `andCond`
- checkFlag Opt_DeriveGeneric)
+ | cls_key == genClassKey = Just (checkFlag Opt_DeriveGeneric `andCond`
+ cond_vanilla `andCond`
+ cond_RepresentableOk)
+ | cls_key == gen1ClassKey = Just (checkFlag Opt_DeriveGeneric `andCond`
+ cond_vanilla `andCond`
+ cond_Representable1Ok)
| otherwise = Nothing
where
cls_key = getUnique cls
- cond_std = cond_stdOK mtheta
+ cond_std = cond_stdOK mtheta False -- Vanilla data constructors, at least one,
+ -- and monotype arguments
+ cond_vanilla = cond_stdOK mtheta True -- Vanilla data constructors but
+ -- allow no data cons or polytype arguments
-type Condition = (DynFlags, TyCon, [Type]) -> Maybe SDoc
+type Condition = (DynFlags, TyCon, [Type]) -> Validity
-- first Bool is whether or not we are allowed to derive Data and Typeable
-- second Bool is whether or not we are allowed to derive Functor
-- TyCon is the *representation* tycon if the data type is an indexed one
@@ -1193,37 +1304,42 @@ type Condition = (DynFlags, TyCon, [Type]) -> Maybe SDoc
orCond :: Condition -> Condition -> Condition
orCond c1 c2 tc
- = case c1 tc of
- Nothing -> Nothing -- c1 succeeds
- Just x -> case c2 tc of -- c1 fails
- Nothing -> Nothing
- Just y -> Just (x $$ ptext (sLit " or") $$ y)
- -- Both fail
+ = case (c1 tc, c2 tc) of
+ (IsValid, _) -> IsValid -- c1 succeeds
+ (_, IsValid) -> IsValid -- c21 succeeds
+ (NotValid x, NotValid y) -> NotValid (x $$ ptext (sLit " or") $$ y)
+ -- Both fail
andCond :: Condition -> Condition -> Condition
-andCond c1 c2 tc = case c1 tc of
- Nothing -> c2 tc -- c1 succeeds
- Just x -> Just x -- c1 fails
-
-cond_stdOK :: DerivContext -> Condition
-cond_stdOK (Just _) _
- = Nothing -- Don't check these conservative conditions for
+andCond c1 c2 tc = c1 tc `andValid` c2 tc
+
+cond_stdOK :: DerivContext -- Says whether this is standalone deriving or not;
+ -- if standalone, we just say "yes, go for it"
+ -> Bool -- True <=> permissive: allow higher rank
+ -- args and no data constructors
+ -> Condition
+cond_stdOK (Just _) _ _
+ = IsValid -- Don't check these conservative conditions for
-- standalone deriving; just generate the code
-- and let the typechecker handle the result
-cond_stdOK Nothing (_, rep_tc, _)
- | null data_cons = Just (no_cons_why rep_tc $$ suggestion)
- | not (null con_whys) = Just (vcat con_whys $$ suggestion)
- | otherwise = Nothing
+cond_stdOK Nothing permissive (_, rep_tc, _)
+ | null data_cons
+ , not permissive = NotValid (no_cons_why rep_tc $$ suggestion)
+ | not (null con_whys) = NotValid (vcat con_whys $$ suggestion)
+ | otherwise = IsValid
where
- suggestion = ptext (sLit "Possible fix: use a standalone deriving declaration instead")
- data_cons = tyConDataCons rep_tc
- con_whys = mapCatMaybes check_con data_cons
+ suggestion = ptext (sLit "Possible fix: use a standalone deriving declaration instead")
+ data_cons = tyConDataCons rep_tc
+ con_whys = getInvalids (map check_con data_cons)
- check_con :: DataCon -> Maybe SDoc
+ check_con :: DataCon -> Validity
check_con con
- | isVanillaDataCon con
- , all isTauTy (dataConOrigArgTys con) = Nothing
- | otherwise = Just (badCon con (ptext (sLit "must have a Haskell-98 type")))
+ | not (isVanillaDataCon con)
+ = NotValid (badCon con (ptext (sLit "has existentials or constraints in its type")))
+ | not (permissive || all isTauTy (dataConOrigArgTys con))
+ = NotValid (badCon con (ptext (sLit "has a higher-rank type")))
+ | otherwise
+ = IsValid
no_cons_why :: TyCon -> SDoc
no_cons_why rep_tc = quotes (pprSourceTyCon rep_tc) <+>
@@ -1241,12 +1357,12 @@ cond_enumOrProduct cls = cond_isEnumeration `orCond`
cond_args :: Class -> Condition
-- For some classes (eg Eq, Ord) we allow unlifted arg types
--- by generating specilaised code. For others (eg Data) we don't.
+-- by generating specialised code. For others (eg Data) we don't.
cond_args cls (_, tc, _)
= case bad_args of
- [] -> Nothing
- (ty:_) -> Just (hang (ptext (sLit "Don't know how to derive") <+> quotes (ppr cls))
- 2 (ptext (sLit "for type") <+> quotes (ppr ty)))
+ [] -> IsValid
+ (ty:_) -> NotValid (hang (ptext (sLit "Don't know how to derive") <+> quotes (ppr cls))
+ 2 (ptext (sLit "for type") <+> quotes (ppr ty)))
where
bad_args = [ arg_ty | con <- tyConDataCons tc
, arg_ty <- dataConOrigArgTys con
@@ -1266,8 +1382,8 @@ cond_args cls (_, tc, _)
cond_isEnumeration :: Condition
cond_isEnumeration (_, rep_tc, _)
- | isEnumerationTyCon rep_tc = Nothing
- | otherwise = Just why
+ | isEnumerationTyCon rep_tc = IsValid
+ | otherwise = NotValid why
where
why = sep [ quotes (pprSourceTyCon rep_tc) <+>
ptext (sLit "must be an enumeration type")
@@ -1276,8 +1392,8 @@ cond_isEnumeration (_, rep_tc, _)
cond_isProduct :: Condition
cond_isProduct (_, rep_tc, _)
- | isProductTyCon rep_tc = Nothing
- | otherwise = Just why
+ | isProductTyCon rep_tc = IsValid
+ | otherwise = NotValid why
where
why = quotes (pprSourceTyCon rep_tc) <+>
ptext (sLit "must have precisely one constructor")
@@ -1287,30 +1403,16 @@ cond_oldTypeableOK :: Condition
-- Currently: (a) args all of kind *
-- (b) 7 or fewer args
cond_oldTypeableOK (_, tc, _)
- | tyConArity tc > 7 = Just too_many
+ | tyConArity tc > 7 = NotValid too_many
| not (all (isSubOpenTypeKind . tyVarKind) (tyConTyVars tc))
- = Just bad_kind
- | otherwise = Nothing
+ = NotValid bad_kind
+ | otherwise = IsValid
where
too_many = quotes (pprSourceTyCon tc) <+>
ptext (sLit "must have 7 or fewer arguments")
bad_kind = quotes (pprSourceTyCon tc) <+>
ptext (sLit "must only have arguments of kind `*'")
-cond_TypeableOK :: Condition
--- Only not ok if it's a data instance
-cond_TypeableOK (_, tc, tc_args)
- | isDataFamilyTyCon tc && not (null tc_args)
- = Just no_families
-
- | otherwise
- = Nothing
- where
- no_families = sep [ ptext (sLit "Deriving Typeable is not allowed for family instances;")
- , ptext (sLit "derive Typeable for")
- <+> quotes (pprSourceTyCon tc)
- <+> ptext (sLit "alone") ]
-
functorLikeClassKeys :: [Unique]
functorLikeClassKeys = [functorClassKey, foldableClassKey, traversableClassKey]
@@ -1323,15 +1425,15 @@ cond_functorOK :: Bool -> Condition
-- (e) no "stupid context" on data type
cond_functorOK allowFunctions (_, rep_tc, _)
| null tc_tvs
- = Just (ptext (sLit "Data type") <+> quotes (ppr rep_tc)
- <+> ptext (sLit "must have some type parameters"))
+ = NotValid (ptext (sLit "Data type") <+> quotes (ppr rep_tc)
+ <+> ptext (sLit "must have some type parameters"))
| not (null bad_stupid_theta)
- = Just (ptext (sLit "Data type") <+> quotes (ppr rep_tc)
- <+> ptext (sLit "must not have a class context") <+> pprTheta bad_stupid_theta)
+ = NotValid (ptext (sLit "Data type") <+> quotes (ppr rep_tc)
+ <+> ptext (sLit "must not have a class context") <+> pprTheta bad_stupid_theta)
| otherwise
- = msum (map check_con data_cons) -- msum picks the first 'Just', if any
+ = allValid (map check_con data_cons)
where
tc_tvs = tyConTyVars rep_tc
Just (_, last_tv) = snocView tc_tvs
@@ -1339,41 +1441,36 @@ cond_functorOK allowFunctions (_, rep_tc, _)
is_bad pred = last_tv `elemVarSet` tyVarsOfType pred
data_cons = tyConDataCons rep_tc
- check_con con = msum (check_vanilla con : foldDataConArgs (ft_check con) con)
-
- check_vanilla :: DataCon -> Maybe SDoc
- check_vanilla con | isVanillaDataCon con = Nothing
- | otherwise = Just (badCon con existential)
-
- ft_check :: DataCon -> FFoldType (Maybe SDoc)
- ft_check con = FT { ft_triv = Nothing, ft_var = Nothing
- , ft_co_var = Just (badCon con covariant)
- , ft_fun = \x y -> if allowFunctions then x `mplus` y
- else Just (badCon con functions)
- , ft_tup = \_ xs -> msum xs
+ check_con con = allValid (check_universal con : foldDataConArgs (ft_check con) con)
+
+ check_universal :: DataCon -> Validity
+ check_universal con
+ | Just tv <- getTyVar_maybe (last (tyConAppArgs (dataConOrigResTy con)))
+ , tv `elem` dataConUnivTyVars con
+ , not (tv `elemVarSet` tyVarsOfTypes (dataConTheta con))
+ = IsValid -- See Note [Check that the type variable is truly universal]
+ | otherwise
+ = NotValid (badCon con existential)
+
+ ft_check :: DataCon -> FFoldType Validity
+ ft_check con = FT { ft_triv = IsValid, ft_var = IsValid
+ , ft_co_var = NotValid (badCon con covariant)
+ , ft_fun = \x y -> if allowFunctions then x `andValid` y
+ else NotValid (badCon con functions)
+ , ft_tup = \_ xs -> allValid xs
, ft_ty_app = \_ x -> x
- , ft_bad_app = Just (badCon con wrong_arg)
+ , ft_bad_app = NotValid (badCon con wrong_arg)
, ft_forall = \_ x -> x }
- existential = ptext (sLit "must not have existential arguments")
+ existential = ptext (sLit "must be truly polymorphic in the last argument of the data type")
covariant = ptext (sLit "must not use the type variable in a function argument")
functions = ptext (sLit "must not contain function types")
wrong_arg = ptext (sLit "must use the type variable only as the last argument of a data type")
-allDistinctTyVars :: [KindOrType] -> Bool
-allDistinctTyVars tkvs = go emptyVarSet tkvs
- where
- go _ [] = True
- go so_far (ty : tys)
- = case getTyVar_maybe ty of
- Nothing -> False
- Just tv | tv `elemVarSet` so_far -> False
- | otherwise -> go (so_far `extendVarSet` tv) tys
-
checkFlag :: ExtensionFlag -> Condition
checkFlag flag (dflags, _, _)
- | xopt flag dflags = Nothing
- | otherwise = Just why
+ | xopt flag dflags = IsValid
+ | otherwise = NotValid why
where
why = ptext (sLit "You need ") <> text flag_str
<+> ptext (sLit "to derive an instance for this class")
@@ -1417,6 +1514,28 @@ badCon :: DataCon -> SDoc -> SDoc
badCon con msg = ptext (sLit "Constructor") <+> quotes (ppr con) <+> msg
\end{code}
+Note [Check that the type variable is truly universal]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For Functor, Foldable, Traversable, we must check that the *last argument*
+of the type constructor is used truly universally quantified. Example
+
+ data T a b where
+ T1 :: a -> b -> T a b -- Fine! Vanilla H-98
+ T2 :: b -> c -> T a b -- Fine! Existential c, but we can still map over 'b'
+ T3 :: b -> T Int b -- Fine! Constraint 'a', but 'b' is still polymorphic
+ T4 :: Ord b => b -> T a b -- No! 'b' is constrained
+ T5 :: b -> T b b -- No! 'b' is constrained
+ T6 :: T a (b,b) -- No! 'b' is constrained
+
+Notice that only the first of these constructors is vanilla H-98. We only
+need to take care about the last argument (b in this case). See Trac #8678.
+Eg. for T1-T3 we can write
+
+ fmap f (T1 a b) = T1 a (f b)
+ fmap f (T2 b c) = T2 (f b) c
+ fmap f (T3 x) = T3 (f x)
+
+
Note [Superclasses of derived instance]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In general, a derived instance decl needs the superclasses of the derived
@@ -1449,14 +1568,15 @@ a context for the Data instances:
%************************************************************************
\begin{code}
-mkNewTypeEqn :: DynFlags -> [Var] -> Class
+mkNewTypeEqn :: DynFlags -> Maybe OverlapMode -> [Var] -> Class
-> [Type] -> TyCon -> [Type] -> TyCon -> [Type]
-> DerivContext
-> TcRn EarlyDerivSpec
-mkNewTypeEqn dflags tvs
+mkNewTypeEqn dflags overlap_mode tvs
cls cls_tys tycon tc_args rep_tycon rep_tc_args mtheta
-- Want: instance (...) => cls (cls_tys ++ [tycon tc_args]) where ...
- | might_derive_via_coercible && (newtype_deriving || std_class_via_coercible cls)
+ | ASSERT( length cls_tys + 1 == classArity cls )
+ might_derive_via_coercible && (newtype_deriving || std_class_via_coercible cls)
= do traceTc "newtype deriving:" (ppr tycon <+> ppr rep_tys <+> ppr all_preds)
dfun_name <- new_dfun_name cls tycon
loc <- getSrcSpanM
@@ -1467,6 +1587,7 @@ mkNewTypeEqn dflags tvs
, ds_cls = cls, ds_tys = inst_tys
, ds_tc = rep_tycon, ds_tc_args = rep_tc_args
, ds_theta = theta
+ , ds_overlap = overlap_mode
, ds_newtype = True }
Nothing -> return $ InferTheta $ DS
{ ds_loc = loc
@@ -1474,6 +1595,7 @@ mkNewTypeEqn dflags tvs
, ds_cls = cls, ds_tys = inst_tys
, ds_tc = rep_tycon, ds_tc_args = rep_tc_args
, ds_theta = all_preds
+ , ds_overlap = overlap_mode
, ds_newtype = True }
| otherwise
= case checkSideConditions dflags mtheta cls cls_tys rep_tycon rep_tc_args of
@@ -1487,7 +1609,7 @@ mkNewTypeEqn dflags tvs
| otherwise -> bale_out non_std
where
newtype_deriving = xopt Opt_GeneralizedNewtypeDeriving dflags
- go_for_it = mk_data_eqn tvs cls tycon tc_args rep_tycon rep_tc_args mtheta
+ go_for_it = mk_data_eqn overlap_mode tvs cls tycon tc_args rep_tycon rep_tc_args mtheta
bale_out msg = failWithTc (derivingThingErr newtype_deriving cls cls_tys inst_ty msg)
non_std = nonStdErr cls
@@ -1583,15 +1705,10 @@ mkNewTypeEqn dflags tvs
-- See Note [Determining whether newtype-deriving is appropriate]
might_derive_via_coercible
= not (non_coercible_class cls)
- && arity_ok
&& eta_ok
&& ats_ok
-- && not (isRecursiveTyCon tycon) -- Note [Recursive newtypes]
- arity_ok = length cls_tys + 1 == classArity cls
- -- Well kinded; eg not: newtype T ... deriving( ST )
- -- because ST needs *2* type params
-
-- Check that eta reduction is OK
eta_ok = nt_eta_arity <= length rep_tc_args
-- The newtype can be eta-reduced to match the number
@@ -1607,13 +1724,10 @@ mkNewTypeEqn dflags tvs
-- so for 'data' instance decls
cant_derive_err
- = vcat [ ppUnless arity_ok arity_msg
- , ppUnless eta_ok eta_msg
+ = vcat [ ppUnless eta_ok eta_msg
, ppUnless ats_ok ats_msg ]
- arity_msg = quotes (ppr (mkClassPred cls cls_tys)) <+> ptext (sLit "does not have arity 1")
eta_msg = ptext (sLit "cannot eta-reduce the representation type enough")
ats_msg = ptext (sLit "the class has associated types")
-
\end{code}
Note [Recursive newtypes]
@@ -1953,12 +2067,14 @@ the renamer. What a great hack!
genInst :: Bool -- True <=> standalone deriving
-> OverlapFlag
-> CommonAuxiliaries
- -> DerivSpec ThetaType -> TcM (InstInfo RdrName, BagDerivStuff, Maybe Name)
-genInst standalone_deriv oflag comauxs
+ -> DerivSpec ThetaType
+ -> TcM (InstInfo RdrName, BagDerivStuff, Maybe Name)
+genInst standalone_deriv default_oflag comauxs
spec@(DS { ds_tvs = tvs, ds_tc = rep_tycon, ds_tc_args = rep_tc_args
, ds_theta = theta, ds_newtype = is_newtype, ds_tys = tys
- , ds_name = name, ds_cls = clas, ds_loc = loc })
- | is_newtype
+ , ds_overlap = overlap_mode
+ , ds_name = dfun_name, ds_cls = clas, ds_loc = loc })
+ | is_newtype -- See Note [Bindings for Generalised Newtype Deriving]
= do { inst_spec <- mkInstance oflag theta spec
; traceTc "genInst/is_newtype" (vcat [ppr loc, ppr clas, ppr tvs, ppr tys, ppr rhs_ty])
; return ( InstInfo
@@ -1966,70 +2082,69 @@ genInst standalone_deriv oflag comauxs
, iBinds = InstBindings
{ ib_binds = gen_Newtype_binds loc clas tvs tys rhs_ty
, ib_pragmas = []
+ , ib_extensions = [ Opt_ImpredicativeTypes
+ , Opt_RankNTypes ]
, ib_standalone_deriving = standalone_deriv } }
, emptyBag
, Just $ getName $ head $ tyConDataCons rep_tycon ) }
-- See Note [Newtype deriving and unused constructors]
| otherwise
- = do { fix_env <- getFixityEnv
- ; (meth_binds, deriv_stuff) <- genDerivStuff (getSrcSpan name)
- fix_env clas name rep_tycon
+ = do { (meth_binds, deriv_stuff) <- genDerivStuff loc clas
+ dfun_name rep_tycon
(lookup rep_tycon comauxs)
; inst_spec <- mkInstance oflag theta spec
; let inst_info = InstInfo { iSpec = inst_spec
, iBinds = InstBindings
{ ib_binds = meth_binds
, ib_pragmas = []
+ , ib_extensions = []
, ib_standalone_deriving = standalone_deriv } }
; return ( inst_info, deriv_stuff, Nothing ) }
where
+ oflag = setOverlapModeMaybe default_oflag overlap_mode
rhs_ty = newTyConInstRhs rep_tycon rep_tc_args
-genDerivStuff :: SrcSpan -> FixityEnv -> Class -> Name -> TyCon
+genDerivStuff :: SrcSpan -> Class -> Name -> TyCon
-> Maybe CommonAuxiliary
-> TcM (LHsBinds RdrName, BagDerivStuff)
-genDerivStuff loc fix_env clas name tycon comaux_maybe
- | className clas `elem` oldTypeableClassNames
- = do dflags <- getDynFlags
- return (gen_old_Typeable_binds dflags loc tycon, emptyBag)
-
- | className clas == typeableClassName
- = do dflags <- getDynFlags
- return (gen_Typeable_binds dflags loc tycon, emptyBag)
-
- | ck `elem` [genClassKey, gen1ClassKey] -- Special case because monadic
- = let gk = if ck == genClassKey then Gen0 else Gen1 -- TODO NSF: correctly identify when we're building Both instead of One
+genDerivStuff loc clas dfun_name tycon comaux_maybe
+ | let ck = classKey clas
+ , ck `elem` [genClassKey, gen1ClassKey] -- Special case because monadic
+ = let gk = if ck == genClassKey then Gen0 else Gen1
+ -- TODO NSF: correctly identify when we're building Both instead of One
Just metaTyCons = comaux_maybe -- well-guarded by commonAuxiliaries and genInst
in do
- (binds, faminst) <- gen_Generic_binds gk tycon metaTyCons (nameModule name)
+ (binds, faminst) <- gen_Generic_binds gk tycon metaTyCons (nameModule dfun_name)
return (binds, DerivFamInst faminst `consBag` emptyBag)
| otherwise -- Non-monadic generators
= do dflags <- getDynFlags
- case assocMaybe (gen_list dflags) (getUnique clas) of
- Just gen_fn -> return (gen_fn loc tycon)
- Nothing -> pprPanic "genDerivStuff: bad derived class" (ppr clas)
- where
- ck = classKey clas
-
- gen_list :: DynFlags
- -> [(Unique, SrcSpan -> TyCon -> (LHsBinds RdrName, BagDerivStuff))]
- gen_list dflags
- = [(eqClassKey, gen_Eq_binds)
- ,(ordClassKey, gen_Ord_binds)
- ,(enumClassKey, gen_Enum_binds)
- ,(boundedClassKey, gen_Bounded_binds)
- ,(ixClassKey, gen_Ix_binds)
- ,(showClassKey, gen_Show_binds fix_env)
- ,(readClassKey, gen_Read_binds fix_env)
- ,(dataClassKey, gen_Data_binds dflags)
- ,(functorClassKey, gen_Functor_binds)
- ,(foldableClassKey, gen_Foldable_binds)
- ,(traversableClassKey, gen_Traversable_binds)
- ]
+ fix_env <- getFixityEnv
+ return (genDerivedBinds dflags fix_env clas loc tycon)
\end{code}
+Note [Bindings for Generalised Newtype Deriving]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider
+ class Eq a => C a where
+ f :: a -> a
+ newtype N a = MkN [a] deriving( C )
+ instance Eq (N a) where ...
+
+The 'deriving C' clause generates, in effect
+ instance (C [a], Eq a) => C (N a) where
+ f = coerce (f :: [a] -> [a])
+
+This generates a cast for each method, but allows the superclasse to
+be worked out in the usual way. In this case the superclass (Eq (N
+a)) will be solved by the explicit Eq (N a) instance. We do *not*
+create the superclasses by casting the superclass dictionaries for the
+representation type.
+
+See the paper "Safe zero-cost coercions for Hsakell".
+
+
%************************************************************************
%* *
\subsection[TcDeriv-taggery-Names]{What con2tag/tag2con functions are available?}
diff --git a/compiler/typecheck/TcEnv.lhs b/compiler/typecheck/TcEnv.lhs
index a2df33814071..f4c7c100634f 100644
--- a/compiler/typecheck/TcEnv.lhs
+++ b/compiler/typecheck/TcEnv.lhs
@@ -3,7 +3,9 @@
%
\begin{code}
+{-# LANGUAGE CPP, FlexibleInstances #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
+
module TcEnv(
TyThing(..), TcTyThing(..), TcId,
@@ -16,8 +18,8 @@ module TcEnv(
tcExtendGlobalEnv, tcExtendGlobalEnvImplicit, setGlobalTypeEnv,
tcExtendGlobalValEnv,
tcLookupLocatedGlobal, tcLookupGlobal,
- tcLookupField, tcLookupTyCon, tcLookupClass, tcLookupDataCon,
- tcLookupConLike,
+ tcLookupField, tcLookupTyCon, tcLookupClass,
+ tcLookupDataCon, tcLookupPatSyn, tcLookupConLike,
tcLookupLocatedGlobalId, tcLookupLocatedTyCon,
tcLookupLocatedClass, tcLookupInstance, tcLookupAxiom,
@@ -66,11 +68,13 @@ import TcIface
import PrelNames
import TysWiredIn
import Id
+import IdInfo( IdDetails(VanillaId) )
import Var
import VarSet
import RdrName
import InstEnv
-import DataCon
+import DataCon ( DataCon )
+import PatSyn ( PatSyn )
import ConLike
import TyCon
import CoAxiom
@@ -157,6 +161,13 @@ tcLookupDataCon name = do
AConLike (RealDataCon con) -> return con
_ -> wrongThingErr "data constructor" (AGlobal thing) name
+tcLookupPatSyn :: Name -> TcM PatSyn
+tcLookupPatSyn name = do
+ thing <- tcLookupGlobal name
+ case thing of
+ AConLike (PatSynCon ps) -> return ps
+ _ -> wrongThingErr "pattern synonym" (AGlobal thing) name
+
tcLookupConLike :: Name -> TcM ConLike
tcLookupConLike name = do
thing <- tcLookupGlobal name
@@ -715,6 +726,10 @@ data InstBindings a
{ ib_binds :: (LHsBinds a) -- Bindings for the instance methods
, ib_pragmas :: [LSig a] -- User pragmas recorded for generating
-- specialised instances
+ , ib_extensions :: [ExtensionFlag] -- any extra extensions that should
+ -- be enabled when type-checking this
+ -- instance; needed for
+ -- GeneralizedNewtypeDeriving
, ib_standalone_deriving :: Bool
-- True <=> This code came from a standalone deriving clause
@@ -797,7 +812,7 @@ mkStableIdFromString str sig_ty loc occ_wrapper = do
name <- mkWrapperName "stable" str
let occ = mkVarOccFS name :: OccName
gnm = mkExternalName uniq mod (occ_wrapper occ) loc :: Name
- id = mkExportedLocalId gnm sig_ty :: Id
+ id = mkExportedLocalId VanillaId gnm sig_ty :: Id
return id
mkStableIdFromName :: Name -> Type -> SrcSpan -> (OccName -> OccName) -> TcM TcId
@@ -812,7 +827,7 @@ mkWrapperName what nameBase
thisMod <- getModule
let -- Note [Generating fresh names for ccall wrapper]
wrapperRef = nextWrapperNum dflags
- pkg = packageIdString (modulePackageId thisMod)
+ pkg = packageKeyString (modulePackageKey thisMod)
mod = moduleNameString (moduleName thisMod)
wrapperNum <- liftIO $ atomicModifyIORef wrapperRef $ \mod_env ->
let num = lookupWithDefaultModuleEnv mod_env 0 thisMod
@@ -860,13 +875,16 @@ notFound name
ptext (sLit "is not in scope during type checking, but it passed the renamer"),
ptext (sLit "tcl_env of environment:") <+> ppr (tcl_env lcl_env)]
-- Take case: printing the whole gbl env can
- -- cause an infnite loop, in the case where we
+ -- cause an infinite loop, in the case where we
-- are in the middle of a recursive TyCon/Class group;
-- so let's just not print it! Getting a loop here is
-- very unhelpful, because it hides one compiler bug with another
}
wrongThingErr :: String -> TcTyThing -> Name -> TcM a
+-- It's important that this only calls pprTcTyThingCategory, which in
+-- turn does not look at the details of the TcTyThing.
+-- See Note [Placeholder PatSyn kinds] in TcBinds
wrongThingErr expected thing name
= failWithTc (pprTcTyThingCategory thing <+> quotes (ppr name) <+>
ptext (sLit "used as a") <+> text expected)
diff --git a/compiler/typecheck/TcErrors.lhs b/compiler/typecheck/TcErrors.lhs
index f105cdddfffd..c8f3d069972b 100644
--- a/compiler/typecheck/TcErrors.lhs
+++ b/compiler/typecheck/TcErrors.lhs
@@ -1,6 +1,6 @@
\begin{code}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -38,7 +38,6 @@ import Var
import VarSet
import VarEnv
import Bag
-import Maybes
import ErrUtils ( ErrMsg, makeIntoWarning, pprLocErrMsg )
import BasicTypes
import Util
@@ -47,7 +46,9 @@ import Outputable
import SrcLoc
import DynFlags
import ListSetOps ( equivClasses )
-import Data.List ( partition, mapAccumL, zip4 )
+
+import Data.Maybe
+import Data.List ( partition, mapAccumL, zip4, nub )
\end{code}
%************************************************************************
@@ -667,10 +668,11 @@ mkTyVarEqErr :: DynFlags -> ReportErrCtxt -> SDoc -> Ct
-- tv1 and ty2 are already tidied
mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
| isUserSkolem ctxt tv1 -- ty2 won't be a meta-tyvar, or else the thing would
- -- be oriented the other way round; see TcCanonical.reOrient
+ -- be oriented the other way round;
+ -- see TcCanonical.canEqTyVarTyVar
|| isSigTyVar tv1 && not (isTyVarTy ty2)
= mkErrorMsg ctxt ct (vcat [ misMatchOrCND ctxt ct oriented ty1 ty2
- , extraTyVarInfo ctxt ty1 ty2
+ , extraTyVarInfo ctxt tv1 ty2
, extra ])
-- So tv is a meta tyvar (or started that way before we
@@ -700,7 +702,7 @@ mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
, Implic { ic_skols = skols } <- implic
, tv1 `elem` skols
= mkErrorMsg ctxt ct (vcat [ misMatchMsg oriented ty1 ty2
- , extraTyVarInfo ctxt ty1 ty2
+ , extraTyVarInfo ctxt tv1 ty2
, extra ])
-- Check for skolem escape
@@ -727,14 +729,15 @@ mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
| (implic:_) <- cec_encl ctxt -- Get the innermost context
, Implic { ic_env = env, ic_given = given, ic_info = skol_info } <- implic
= do { let msg = misMatchMsg oriented ty1 ty2
- untch_extra
+ untch_extra
= nest 2 $
sep [ quotes (ppr tv1) <+> ptext (sLit "is untouchable")
, nest 2 $ ptext (sLit "inside the constraints") <+> pprEvVarTheta given
, nest 2 $ ptext (sLit "bound by") <+> ppr skol_info
, nest 2 $ ptext (sLit "at") <+> ppr (tcl_loc env) ]
- tv_extra = extraTyVarInfo ctxt ty1 ty2
- ; mkErrorMsg ctxt ct (vcat [msg, untch_extra, tv_extra, extra]) }
+ tv_extra = extraTyVarInfo ctxt tv1 ty2
+ add_sig = suggestAddSig ctxt ty1 ty2
+ ; mkErrorMsg ctxt ct (vcat [msg, untch_extra, tv_extra, add_sig, extra]) }
| otherwise
= reportEqErr ctxt extra ct oriented (mkTyVarTy tv1) ty2
@@ -791,7 +794,7 @@ misMatchOrCND ctxt ct oriented ty1 ty2
-- or there is no context, don't report the context
= misMatchMsg oriented ty1 ty2
| otherwise
- = couldNotDeduce givens ([mkEqPred ty1 ty2], orig)
+ = couldNotDeduce givens ([mkTcEqPred ty1 ty2], orig)
where
givens = getUserGivens ctxt
orig = TypeEqOrigin { uo_actual = ty1, uo_expected = ty2 }
@@ -813,30 +816,46 @@ pp_givens givens
2 (sep [ ptext (sLit "bound by") <+> ppr skol_info
, ptext (sLit "at") <+> ppr loc])
-extraTyVarInfo :: ReportErrCtxt -> TcType -> TcType -> SDoc
--- Add on extra info about the types themselves
+extraTyVarInfo :: ReportErrCtxt -> TcTyVar -> TcType -> SDoc
+-- Add on extra info about skolem constants
-- NB: The types themselves are already tidied
-extraTyVarInfo ctxt ty1 ty2
- = nest 2 (extra1 $$ extra2)
+extraTyVarInfo ctxt tv1 ty2
+ = nest 2 (tv_extra tv1 $$ ty_extra ty2)
where
- extra1 = tyVarExtraInfoMsg (cec_encl ctxt) ty1
- extra2 = tyVarExtraInfoMsg (cec_encl ctxt) ty2
-
-tyVarExtraInfoMsg :: [Implication] -> Type -> SDoc
--- Shows a bit of extra info about skolem constants
-tyVarExtraInfoMsg implics ty
- | Just tv <- tcGetTyVar_maybe ty
- , isTcTyVar tv, isSkolemTyVar tv
- , let pp_tv = quotes (ppr tv)
- = case tcTyVarDetails tv of
- SkolemTv {} -> pp_tv <+> pprSkol (getSkolemInfo implics tv) (getSrcLoc tv)
- FlatSkol {} -> pp_tv <+> ptext (sLit "is a flattening type variable")
- RuntimeUnk {} -> pp_tv <+> ptext (sLit "is an interactive-debugger skolem")
- MetaTv {} -> empty
-
- | otherwise -- Normal case
- = empty
-
+ implics = cec_encl ctxt
+ ty_extra ty = case tcGetTyVar_maybe ty of
+ Just tv -> tv_extra tv
+ Nothing -> empty
+
+ tv_extra tv | isTcTyVar tv, isSkolemTyVar tv
+ , let pp_tv = quotes (ppr tv)
+ = case tcTyVarDetails tv of
+ SkolemTv {} -> pp_tv <+> pprSkol (getSkolemInfo implics tv) (getSrcLoc tv)
+ FlatSkol {} -> pp_tv <+> ptext (sLit "is a flattening type variable")
+ RuntimeUnk {} -> pp_tv <+> ptext (sLit "is an interactive-debugger skolem")
+ MetaTv {} -> empty
+
+ | otherwise -- Normal case
+ = empty
+
+suggestAddSig :: ReportErrCtxt -> TcType -> TcType -> SDoc
+-- See Note [Suggest adding a type signature]
+suggestAddSig ctxt ty1 ty2
+ | null inferred_bndrs
+ = empty
+ | [bndr] <- inferred_bndrs
+ = ptext (sLit "Possible fix: add a type signature for") <+> quotes (ppr bndr)
+ | otherwise
+ = ptext (sLit "Possible fix: add type signatures for some or all of") <+> (ppr inferred_bndrs)
+ where
+ inferred_bndrs = nub (get_inf ty1 ++ get_inf ty2)
+ get_inf ty | Just tv <- tcGetTyVar_maybe ty
+ , isTcTyVar tv, isSkolemTyVar tv
+ , InferSkol prs <- getSkolemInfo (cec_encl ctxt) tv
+ = map fst prs
+ | otherwise
+ = []
+
kindErrorMsg :: TcType -> TcType -> SDoc -- Types are already tidy
kindErrorMsg ty1 ty2
= vcat [ ptext (sLit "Kind incompatibility when matching types:")
@@ -884,7 +903,7 @@ sameOccExtra ty1 ty2
, let n1 = tyConName tc1
n2 = tyConName tc2
same_occ = nameOccName n1 == nameOccName n2
- same_pkg = modulePackageId (nameModule n1) == modulePackageId (nameModule n2)
+ same_pkg = modulePackageKey (nameModule n1) == modulePackageKey (nameModule n2)
, n1 /= n2 -- Different Names
, same_occ -- but same OccName
= ptext (sLit "NB:") <+> (ppr_from same_pkg n1 $$ ppr_from same_pkg n2)
@@ -898,14 +917,31 @@ sameOccExtra ty1 ty2
| otherwise -- Imported things have an UnhelpfulSrcSpan
= hang (quotes (ppr nm))
2 (sep [ ptext (sLit "is defined in") <+> quotes (ppr (moduleName mod))
- , ppUnless (same_pkg || pkg == mainPackageId) $
+ , ppUnless (same_pkg || pkg == mainPackageKey) $
nest 4 $ ptext (sLit "in package") <+> quotes (ppr pkg) ])
where
- pkg = modulePackageId mod
+ pkg = modulePackageKey mod
mod = nameModule nm
loc = nameSrcSpan nm
\end{code}
+Note [Suggest adding a type signature]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The OutsideIn algorithm rejects GADT programs that don't have a principal
+type, and indeed some that do. Example:
+ data T a where
+ MkT :: Int -> T Int
+
+ f (MkT n) = n
+
+Does this have type f :: T a -> a, or f :: T a -> Int?
+The error that shows up tends to be an attempt to unify an
+untouchable type variable. So suggestAddSig sees if the offending
+type variable is bound by an *inferred* signature, and suggests
+adding a declared signature instead.
+
+This initially came up in Trac #8968, concerning pattern synonyms.
+
Note [Disambiguating (X ~ X) errors]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See Trac #8278
@@ -995,9 +1031,8 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
= do { let (is_ambig, ambig_msg) = mkAmbigMsg ct
; (ctxt, binds_msg) <- relevantBindings True ctxt ct
; traceTc "mk_dict_err" (ppr ct $$ ppr is_ambig $$ ambig_msg)
- ; safe_mod <- safeLanguageOn `fmap` getDynFlags
; rdr_env <- getGlobalRdrEnv
- ; return (ctxt, cannot_resolve_msg safe_mod rdr_env is_ambig binds_msg ambig_msg) }
+ ; return (ctxt, cannot_resolve_msg rdr_env is_ambig binds_msg ambig_msg) }
| not safe_haskell -- Some matches => overlap errors
= return (ctxt, overlap_msg)
@@ -1012,8 +1047,8 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
givens = getUserGivens ctxt
all_tyvars = all isTyVarTy tys
- cannot_resolve_msg safe_mod rdr_env has_ambig_tvs binds_msg ambig_msg
- = vcat [ addArising orig (no_inst_msg $$ coercible_explanation safe_mod rdr_env)
+ cannot_resolve_msg rdr_env has_ambig_tvs binds_msg ambig_msg
+ = vcat [ addArising orig (no_inst_msg $$ coercible_explanation rdr_env)
, vcat (pp_givens givens)
, ppWhen (has_ambig_tvs && not (null unifiers && null givens))
(vcat [ ambig_msg, binds_msg, potential_msg ])
@@ -1033,7 +1068,7 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
add_to_ctxt_fixes has_ambig_tvs
| not has_ambig_tvs && all_tyvars
- , (orig:origs) <- mapCatMaybes get_good_orig (cec_encl ctxt)
+ , (orig:origs) <- mapMaybe get_good_orig (cec_encl ctxt)
= [sep [ ptext (sLit "add") <+> pprParendType pred
<+> ptext (sLit "to the context of")
, nest 2 $ ppr_skol orig $$
@@ -1052,8 +1087,9 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
no_inst_msg
| clas == coercibleClass
= let (ty1, ty2) = getEqPredTys pred
- in ptext (sLit "Could not coerce from") <+> quotes (ppr ty1) <+>
- ptext (sLit "to") <+> quotes (ppr ty2)
+ in sep [ ptext (sLit "Could not coerce from") <+> quotes (ppr ty1)
+ , nest 19 (ptext (sLit "to") <+> quotes (ppr ty2)) ]
+ -- The nesting makes the types line up
| null givens && null matches
= ptext (sLit "No instance for") <+> pprParendType pred
| otherwise
@@ -1102,7 +1138,7 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
ispecs = [ispec | (ispec, _) <- matches]
givens = getUserGivens ctxt
- matching_givens = mapCatMaybes matchable givens
+ matching_givens = mapMaybe matchable givens
matchable (evvars,skol_info,loc)
= case ev_vars_matching of
@@ -1138,27 +1174,12 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
-- This function tries to reconstruct why a "Coercible ty1 ty2" constraint
-- is left over. Therefore its logic has to stay in sync with
-- getCoericbleInst in TcInteract. See Note [Coercible Instances]
- coercible_explanation safe_mod rdr_env
+ coercible_explanation rdr_env
| clas /= coercibleClass = empty
| Just (tc1,tyArgs1) <- splitTyConApp_maybe ty1,
Just (tc2,tyArgs2) <- splitTyConApp_maybe ty2,
tc1 == tc2
= nest 2 $ vcat $
- -- Only for safe haskell: First complain if tc is abstract, only if
- -- not check if the type constructors therein are abstract
- (if safe_mod
- then case tyConAbstractMsg rdr_env tc1 empty of
- Just msg ->
- [ msg $$ ptext (sLit "as required in SafeHaskell mode") ]
- Nothing ->
- [ msg
- | tc <- tyConsOfTyCon tc1
- , Just msg <- return $
- tyConAbstractMsg rdr_env tc $
- parens $ ptext (sLit "used within") <+> quotes (ppr tc1)
- ]
- else []
- ) ++
[ fsep [ hsep [ ptext $ sLit "because the", speakNth n, ptext $ sLit "type argument"]
, hsep [ ptext $ sLit "of", quotes (ppr tc1), ptext $ sLit "has role Nominal,"]
, ptext $ sLit "but the arguments"
@@ -1176,9 +1197,9 @@ mk_dict_err ctxt (ct, (matches, unifiers, safe_haskell))
Just msg <- coercible_msg_for_tycon rdr_env tc
= msg
| otherwise
- = nest 2 $ hsep [ ptext $ sLit "because", quotes (ppr ty1),
- ptext $ sLit "and", quotes (ppr ty2),
- ptext $ sLit "are different types." ]
+ = nest 2 $ sep [ ptext (sLit "because") <+> quotes (ppr ty1)
+ , nest 4 (vcat [ ptext (sLit "and") <+> quotes (ppr ty2)
+ , ptext (sLit "are different types.") ]) ]
where
(ty1, ty2) = getEqPredTys pred
@@ -1268,29 +1289,51 @@ flattening any further. After all, there can be no instance declarations
that match such things. And flattening under a for-all is problematic
anyway; consider C (forall a. F a)
+Note [Suggest -fprint-explicit-kinds]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It can be terribly confusing to get an error message like (Trac #9171)
+ Couldn't match expected type ‘GetParam Base (GetParam Base Int)’
+ with actual type ‘GetParam Base (GetParam Base Int)’
+The reason may be that the kinds don't match up. Typically you'll get
+more useful information, but not when it's as a result of ambiguity.
+This test suggests -fprint-explicit-kinds when all the ambiguous type
+variables are kind variables.
+
\begin{code}
mkAmbigMsg :: Ct -> (Bool, SDoc)
mkAmbigMsg ct
- | isEmptyVarSet ambig_tv_set = (False, empty)
- | otherwise = (True, msg)
+ | null ambig_tkvs = (False, empty)
+ | otherwise = (True, msg)
where
- ambig_tv_set = filterVarSet isAmbiguousTyVar (tyVarsOfCt ct)
- ambig_tvs = varSetElems ambig_tv_set
-
- is_or_are | isSingleton ambig_tvs = text "is"
- | otherwise = text "are"
-
- msg | any isRuntimeUnkSkol ambig_tvs -- See Note [Runtime skolems]
+ ambig_tkv_set = filterVarSet isAmbiguousTyVar (tyVarsOfCt ct)
+ ambig_tkvs = varSetElems ambig_tkv_set
+ (ambig_kvs, ambig_tvs) = partition isKindVar ambig_tkvs
+
+ msg | any isRuntimeUnkSkol ambig_tkvs -- See Note [Runtime skolems]
= vcat [ ptext (sLit "Cannot resolve unknown runtime type") <> plural ambig_tvs
<+> pprQuotedList ambig_tvs
, ptext (sLit "Use :print or :force to determine these types")]
- | otherwise
- = vcat [ text "The type variable" <> plural ambig_tvs
- <+> pprQuotedList ambig_tvs
- <+> is_or_are <+> text "ambiguous" ]
+
+ | not (null ambig_tvs)
+ = pp_ambig (ptext (sLit "type")) ambig_tvs
+
+ | otherwise -- All ambiguous kind variabes; suggest -fprint-explicit-kinds
+ = vcat [ pp_ambig (ptext (sLit "kind")) ambig_kvs
+ , sdocWithDynFlags suggest_explicit_kinds ]
+
+ pp_ambig what tkvs
+ = ptext (sLit "The") <+> what <+> ptext (sLit "variable") <> plural tkvs
+ <+> pprQuotedList tkvs <+> is_or_are tkvs <+> ptext (sLit "ambiguous")
+
+ is_or_are [_] = text "is"
+ is_or_are _ = text "are"
+
+ suggest_explicit_kinds dflags -- See Note [Suggest -fprint-explicit-kinds]
+ | gopt Opt_PrintExplicitKinds dflags = empty
+ | otherwise = ptext (sLit "Use -fprint-explicit-kinds to see the kind arguments")
pprSkol :: SkolemInfo -> SrcLoc -> SDoc
-pprSkol UnkSkol _
+pprSkol UnkSkol _
= ptext (sLit "is an unknown type variable")
pprSkol skol_info tv_loc
= sep [ ptext (sLit "is a rigid type variable bound by"),
diff --git a/compiler/typecheck/TcEvidence.lhs b/compiler/typecheck/TcEvidence.lhs
index 3471b327fa73..7fc6194b8fe0 100644
--- a/compiler/typecheck/TcEvidence.lhs
+++ b/compiler/typecheck/TcEvidence.lhs
@@ -3,6 +3,8 @@
%
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+
module TcEvidence (
-- HsWrapper
@@ -25,7 +27,7 @@ module TcEvidence (
mkTcSymCo, mkTcTransCo, mkTcNthCo, mkTcLRCo, mkTcSubCo,
mkTcAxiomRuleCo,
tcCoercionKind, coVarsOfTcCo, isEqVar, mkTcCoVarCo,
- isTcReflCo, isTcReflCo_maybe, getTcCoVar_maybe,
+ isTcReflCo, getTcCoVar_maybe,
tcCoercionRole, eqVarRole,
coercionToTcCoercion
) where
@@ -82,7 +84,7 @@ differences
* The kind of a TcCoercion is t1 ~ t2 (resp. Coercible t1 t2)
of a Coercion is t1 ~# t2 (resp. t1 ~#R t2)
- * UnsafeCo aren't required, but we do have TcPhandomCo
+ * UnsafeCo aren't required, but we do have TcPhantomCo
* Representation invariants are weaker:
- we are allowed to have type synonyms in TcTyConAppCo
@@ -351,7 +353,7 @@ pprTcCo, pprParendTcCo :: TcCoercion -> SDoc
pprTcCo co = ppr_co TopPrec co
pprParendTcCo co = ppr_co TyConPrec co
-ppr_co :: Prec -> TcCoercion -> SDoc
+ppr_co :: TyPrec -> TcCoercion -> SDoc
ppr_co _ (TcRefl r ty) = angleBrackets (ppr ty) <> ppr_role r
ppr_co p co@(TcTyConAppCo _ tc [_,_])
@@ -404,7 +406,7 @@ ppr_role r = underscore <> pp_role
Representational -> char 'R'
Phantom -> char 'P'
-ppr_fun_co :: Prec -> TcCoercion -> SDoc
+ppr_fun_co :: TyPrec -> TcCoercion -> SDoc
ppr_fun_co p co = pprArrowChain p (split co)
where
split :: TcCoercion -> [SDoc]
@@ -413,7 +415,7 @@ ppr_fun_co p co = pprArrowChain p (split co)
= ppr_co FunPrec arg : split res
split co = [ppr_co TopPrec co]
-ppr_forall_co :: Prec -> TcCoercion -> SDoc
+ppr_forall_co :: TyPrec -> TcCoercion -> SDoc
ppr_forall_co p ty
= maybeParen p FunPrec $
sep [pprForAll tvs, ppr_co TopPrec rho]
@@ -594,7 +596,7 @@ data EvTerm
-- dictionaries, even though the former have no
-- selector Id. We count up from _0_
- | EvLit EvLit -- Dictionary for KnownNat and KnownLit classes.
+ | EvLit EvLit -- Dictionary for KnownNat and KnownSymbol classes.
-- Note [KnownNat & KnownSymbol and EvLit]
deriving( Data.Data, Data.Typeable)
@@ -651,7 +653,7 @@ Conclusion: a new wanted coercion variable should be made mutable.
Note [KnownNat & KnownSymbol and EvLit]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A part of the type-level literals implementation are the classes
-"KnownNat" and "KnownLit", which provide a "smart" constructor for
+"KnownNat" and "KnownSymbol", which provide a "smart" constructor for
defining singleton values. Here is the key stuff from GHC.TypeLits
class KnownNat (n :: Nat) where
@@ -692,7 +694,7 @@ especialy when the `KnowNat` evidence is packaged up in an existential.
The story for kind `Symbol` is analogous:
* class KnownSymbol
- * newypte SSymbol
+ * newtype SSymbol
* Evidence: EvLit (EvStr n)
diff --git a/compiler/typecheck/TcExpr.lhs b/compiler/typecheck/TcExpr.lhs
index 409a23047114..7e6c495506d9 100644
--- a/compiler/typecheck/TcExpr.lhs
+++ b/compiler/typecheck/TcExpr.lhs
@@ -5,6 +5,8 @@ c%
\section[TcExpr]{Typecheck an expression}
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcExpr ( tcPolyExpr, tcPolyExprNC, tcMonoExpr, tcMonoExprNC,
tcInferRho, tcInferRhoNC,
tcSyntaxOp, tcCheckId,
@@ -74,7 +76,7 @@ import qualified Data.Set as Set
\begin{code}
tcPolyExpr, tcPolyExprNC
:: LHsExpr Name -- Expression to type check
- -> TcSigmaType -- Expected type (could be a polytpye)
+ -> TcSigmaType -- Expected type (could be a polytype)
-> TcM (LHsExpr TcId) -- Generalised expr with expected type
-- tcPolyExpr is a convenient place (frequent but not too frequent)
@@ -200,7 +202,7 @@ tcExpr (HsIPVar x) res_ty
; ip_var <- emitWanted origin (mkClassPred ipClass [ip_name, ip_ty])
; tcWrapResult (fromDict ipClass ip_name ip_ty (HsVar ip_var)) ip_ty res_ty }
where
- -- Coerces a dictionry for `IP "x" t` into `t`.
+ -- Coerces a dictionary for `IP "x" t` into `t`.
fromDict ipClass x ty =
case unwrapNewTyCon_maybe (classTyCon ipClass) of
Just (_,_,ax) -> HsWrap $ mkWpCast $ mkTcUnbranchedAxInstCo Representational ax [x,ty]
@@ -221,11 +223,14 @@ tcExpr e@(HsLamCase _ matches) res_ty
tcExpr (ExprWithTySig expr sig_ty) res_ty
= do { sig_tc_ty <- tcHsSigType ExprSigCtxt sig_ty
- -- Remember to extend the lexical type-variable environment
; (gen_fn, expr')
<- tcGen ExprSigCtxt sig_tc_ty $ \ skol_tvs res_ty ->
- tcExtendTyVarEnv2 (hsExplicitTvs sig_ty `zip` skol_tvs) $
- -- See Note [More instantiated than scoped] in TcBinds
+
+ -- Remember to extend the lexical type-variable environment
+ -- See Note [More instantiated than scoped] in TcBinds
+ tcExtendTyVarEnv2
+ [(n,tv) | (Just n, tv) <- findScopedTyVars sig_ty sig_tc_ty skol_tvs] $
+
tcMonoExprNC expr res_ty
; let inner_expr = ExprWithTySigOut (mkLHsWrap gen_fn expr') sig_ty
@@ -318,24 +323,25 @@ tcExpr (OpApp arg1 op fix arg2) res_ty
-- arg1_ty = arg2_ty -> op_res_ty
-- And arg2_ty maybe polymorphic; that's the point
- -- Make sure that the argument and result types have kind '*'
+ -- Make sure that the argument type has kind '*'
-- Eg we do not want to allow (D# $ 4.0#) Trac #5570
-- (which gives a seg fault)
-- We do this by unifying with a MetaTv; but of course
-- it must allow foralls in the type it unifies with (hence PolyTv)!
+ --
+ -- The result type can have any kind (Trac #8739),
+ -- so we can just use res_ty
- -- ($) :: forall ab. (a->b) -> a -> b
+ -- ($) :: forall (a:*) (b:Open). (a->b) -> a -> b
; a_ty <- newPolyFlexiTyVarTy
- ; b_ty <- newPolyFlexiTyVarTy
; arg2' <- tcArg op (arg2, arg2_ty, 2)
- ; co_res <- unifyType b_ty res_ty -- b ~ res
- ; co_a <- unifyType arg2_ty a_ty -- arg2 ~ a
- ; co_b <- unifyType op_res_ty b_ty -- op_res ~ b
+ ; co_a <- unifyType arg2_ty a_ty -- arg2 ~ a
+ ; co_b <- unifyType op_res_ty res_ty -- op_res ~ res
; op_id <- tcLookupId op_name
- ; let op' = L loc (HsWrap (mkWpTyApps [a_ty, b_ty]) (HsVar op_id))
- ; return $ mkHsWrapCo (co_res) $
+ ; let op' = L loc (HsWrap (mkWpTyApps [a_ty, res_ty]) (HsVar op_id))
+ ; return $
OpApp (mkLHsWrapCo (mkTcFunCo Nominal co_a co_b) $
mkLHsWrapCo co_arg1 arg1')
op' fix
@@ -494,7 +500,8 @@ for conditionals:
to support expressions like this:
ifThenElse :: Maybe a -> (a -> b) -> b -> b
- ifThenElse (Just a) f _ = f a ifThenElse Nothing _ e = e
+ ifThenElse (Just a) f _ = f a
+ ifThenElse Nothing _ e = e
example :: String
example = if Just 2
@@ -558,7 +565,7 @@ Note that because MkT3 doesn't contain all the fields being updated,
its RHS is simply an error, so it doesn't impose any type constraints.
Hence the use of 'relevant_cont'.
-Note [Implict type sharing]
+Note [Implicit type sharing]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We also take into account any "implicit" non-update fields. For example
data T a b where { MkT { f::a } :: T a a; ... }
@@ -744,7 +751,7 @@ tcExpr (RecordUpd record_expr rbinds _ _ _) res_ty
-- Universally-quantified tyvars that
-- appear in any of the *implicit*
-- arguments to the constructor are fixed
- -- See Note [Implict type sharing]
+ -- See Note [Implicit type sharing]
fixed_tys = [ty | (fld,ty) <- zip flds arg_tys
, not (fld `elem` upd_fld_names)]
@@ -800,7 +807,7 @@ tcExpr (PArrSeq _ _) _
\begin{code}
tcExpr (HsSpliceE is_ty splice) res_ty
- = ASSERT( is_ty ) -- Untyped splices are expanced by the renamer
+ = ASSERT( is_ty ) -- Untyped splices are expanded by the renamer
tcSpliceExpr splice res_ty
tcExpr (HsBracket brack) res_ty = tcTypedBracket brack res_ty
@@ -959,7 +966,7 @@ tcInferFun fun
-- Zonk the function type carefully, to expose any polymorphism
-- E.g. (( \(x::forall a. a->a). blah ) e)
- -- We can see the rank-2 type of the lambda in time to genrealise e
+ -- We can see the rank-2 type of the lambda in time to generalise e
; fun_ty' <- zonkTcType fun_ty
; (wrap, rho) <- deeplyInstantiate AppOrigin fun_ty'
diff --git a/compiler/typecheck/TcForeign.lhs b/compiler/typecheck/TcForeign.lhs
index 26af2c5ebfe0..303391fcddd1 100644
--- a/compiler/typecheck/TcForeign.lhs
+++ b/compiler/typecheck/TcForeign.lhs
@@ -12,6 +12,8 @@ is restricted to what the outside world understands (read C), and this
module checks to see if a foreign declaration has got a legal type.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcForeign
( tcForeignImports
, tcForeignExports
@@ -58,7 +60,6 @@ import SrcLoc
import Bag
import FastString
import Hooks
-import BasicTypes (Origin(..))
import Control.Monad
\end{code}
@@ -93,6 +94,20 @@ parameters.
Similarly, we don't need to look in AppTy's, because nothing headed by
an AppTy will be marshalable.
+Note [FFI type roles]
+~~~~~~~~~~~~~~~~~~~~~
+The 'go' helper function within normaliseFfiType' always produces
+representational coercions. But, in the "children_only" case, we need to
+use these coercions in a TyConAppCo. Accordingly, the roles on the coercions
+must be twiddled to match the expectation of the enclosing TyCon. However,
+we cannot easily go from an R coercion to an N one, so we forbid N roles
+on FFI type constructors. Currently, only two such type constructors exist:
+IO and FunPtr. Thus, this is not an onerous burden.
+
+If we ever want to lift this restriction, we would need to make 'go' take
+the target role as a parameter. This wouldn't be hard, but it's a complication
+not yet necessary and so is not yet implemented.
+
\begin{code}
-- normaliseFfiType takes the type from an FFI declaration, and
-- evaluates any type synonyms, type functions, and newtypes. However,
@@ -115,7 +130,8 @@ normaliseFfiType' env ty0 = go initRecTc ty0
-- We don't want to look through the IO newtype, even if it is
-- in scope, so we have a special case for it:
| tc_key `elem` [ioTyConKey, funPtrTyConKey]
- -- Those *must* have R roles on their parameters!
+ -- These *must not* have nominal roles on their parameters!
+ -- See Note [FFI type roles]
= children_only
| isNewTyCon tc -- Expand newtypes
@@ -142,10 +158,14 @@ normaliseFfiType' env ty0 = go initRecTc ty0
= nothing -- see Note [Don't recur in normaliseFfiType']
where
tc_key = getUnique tc
- children_only
+ children_only
= do xs <- mapM (go rec_nts) tys
let (cos, tys', gres) = unzip3 xs
- return ( mkTyConAppCo Representational tc cos
+ -- the (repeat Representational) is because 'go' always
+ -- returns R coercions
+ cos' = zipWith3 downgradeRole (tyConRoles tc)
+ (repeat Representational) cos
+ return ( mkTyConAppCo Representational tc cos'
, mkTyConApp tc tys', unionManyBags gres)
nt_co = mkUnbranchedAxInstCo Representational (newTyConCo tc) tys
nt_rhs = newTyConInstRhs tc tys
@@ -230,7 +250,7 @@ tcFImport (L dloc fo@(ForeignImport (L nloc nm) hs_ty _ imp_decl))
-- things are LocalIds. However, it does not need zonking,
-- (so TcHsSyn.zonkForeignExports ignores it).
- ; imp_decl' <- tcCheckFIType sig_ty arg_tys res_ty imp_decl
+ ; imp_decl' <- tcCheckFIType arg_tys res_ty imp_decl
-- Can't use sig_ty here because sig_ty :: Type and
-- we need HsType Id hence the undefined
; let fi_decl = ForeignImport (L nloc id) undefined (mkSymCo norm_co) imp_decl'
@@ -241,18 +261,18 @@ tcFImport d = pprPanic "tcFImport" (ppr d)
------------ Checking types for foreign import ----------------------
\begin{code}
-tcCheckFIType :: Type -> [Type] -> Type -> ForeignImport -> TcM ForeignImport
+tcCheckFIType :: [Type] -> Type -> ForeignImport -> TcM ForeignImport
-tcCheckFIType sig_ty arg_tys res_ty (CImport cconv safety mh l@(CLabel _))
+tcCheckFIType arg_tys res_ty (CImport cconv safety mh l@(CLabel _))
-- Foreign import label
= do checkCg checkCOrAsmOrLlvmOrInterp
-- NB check res_ty not sig_ty!
-- In case sig_ty is (forall a. ForeignPtr a)
- check (null arg_tys && isFFILabelTy res_ty) (illegalForeignLabelErr sig_ty)
+ check (isFFILabelTy (mkFunTys arg_tys res_ty)) (illegalForeignTyErr empty)
cconv' <- checkCConv cconv
return (CImport cconv' safety mh l)
-tcCheckFIType sig_ty arg_tys res_ty (CImport cconv safety mh CWrapper) = do
+tcCheckFIType arg_tys res_ty (CImport cconv safety mh CWrapper) = do
-- Foreign wrapper (former f.e.d.)
-- The type must be of the form ft -> IO (FunPtr ft), where ft is a valid
-- foreign type. For legacy reasons ft -> IO (Ptr ft) is accepted, too.
@@ -265,32 +285,32 @@ tcCheckFIType sig_ty arg_tys res_ty (CImport cconv safety mh CWrapper) = do
checkForeignRes mustBeIO checkSafe (isFFIDynTy arg1_ty) res_ty
where
(arg1_tys, res1_ty) = tcSplitFunTys arg1_ty
- _ -> addErrTc (illegalForeignTyErr empty sig_ty)
+ _ -> addErrTc (illegalForeignTyErr empty (ptext (sLit "One argument expected")))
return (CImport cconv' safety mh CWrapper)
-tcCheckFIType sig_ty arg_tys res_ty idecl@(CImport cconv safety mh (CFunction target))
+tcCheckFIType arg_tys res_ty idecl@(CImport cconv safety mh (CFunction target))
| isDynamicTarget target = do -- Foreign import dynamic
checkCg checkCOrAsmOrLlvmOrInterp
cconv' <- checkCConv cconv
case arg_tys of -- The first arg must be Ptr or FunPtr
- [] -> do
- check False (illegalForeignTyErr empty sig_ty)
+ [] ->
+ addErrTc (illegalForeignTyErr empty (ptext (sLit "At least one argument expected")))
(arg1_ty:arg_tys) -> do
dflags <- getDynFlags
let curried_res_ty = foldr FunTy res_ty arg_tys
check (isFFIDynTy curried_res_ty arg1_ty)
- (illegalForeignTyErr argument arg1_ty)
+ (illegalForeignTyErr argument)
checkForeignArgs (isFFIArgumentTy dflags safety) arg_tys
checkForeignRes nonIOok checkSafe (isFFIImportResultTy dflags) res_ty
return $ CImport cconv' safety mh (CFunction target)
| cconv == PrimCallConv = do
dflags <- getDynFlags
- check (xopt Opt_GHCForeignImportPrim dflags)
- (text "Use GHCForeignImportPrim to allow `foreign import prim'.")
+ checkTc (xopt Opt_GHCForeignImportPrim dflags)
+ (text "Use GHCForeignImportPrim to allow `foreign import prim'.")
checkCg checkCOrAsmOrLlvmOrInterp
checkCTarget target
- check (playSafe safety)
- (text "The safe/unsafe annotation should not be used with `foreign import prim'.")
+ checkTc (playSafe safety)
+ (text "The safe/unsafe annotation should not be used with `foreign import prim'.")
checkForeignArgs (isFFIPrimArgumentTy dflags) arg_tys
-- prim import result is more liberal, allows (#,,#)
checkForeignRes nonIOok checkSafe (isFFIPrimResultTy dflags) res_ty
@@ -316,7 +336,7 @@ tcCheckFIType sig_ty arg_tys res_ty idecl@(CImport cconv safety mh (CFunction ta
checkCTarget :: CCallTarget -> TcM ()
checkCTarget (StaticTarget str _ _) = do
checkCg checkCOrAsmOrLlvmOrInterp
- check (isCLabelString str) (badCName str)
+ checkTc (isCLabelString str) (badCName str)
checkCTarget DynamicTarget = panic "checkCTarget DynamicTarget"
@@ -351,7 +371,7 @@ tcForeignExports' decls
where
combine (binds, fs, gres1) (L loc fe) = do
(b, f, gres2) <- setSrcSpan loc (tcFExport fe)
- return ((FromSource, b) `consBag` binds, L loc f : fs, gres1 `unionBags` gres2)
+ return (b `consBag` binds, L loc f : fs, gres1 `unionBags` gres2)
tcFExport :: ForeignDecl Name -> TcM (LHsBind Id, ForeignDecl Id, Bag GlobalRdrElt)
tcFExport fo@(ForeignExport (L loc nm) hs_ty _ spec)
@@ -384,7 +404,7 @@ tcFExport d = pprPanic "tcFExport" (ppr d)
tcCheckFEType :: Type -> ForeignExport -> TcM ForeignExport
tcCheckFEType sig_ty (CExport (CExportStatic str cconv)) = do
checkCg checkCOrAsmOrLlvm
- check (isCLabelString str) (badCName str)
+ checkTc (isCLabelString str) (badCName str)
cconv' <- checkCConv cconv
checkForeignArgs isFFIExternalTy arg_tys
checkForeignRes nonIOok noCheckSafe isFFIExportResultTy res_ty
@@ -406,9 +426,10 @@ tcCheckFEType sig_ty (CExport (CExportStatic str cconv)) = do
\begin{code}
------------ Checking argument types for foreign import ----------------------
-checkForeignArgs :: (Type -> Bool) -> [Type] -> TcM ()
+checkForeignArgs :: (Type -> Validity) -> [Type] -> TcM ()
checkForeignArgs pred tys = mapM_ go tys
- where go ty = check (pred ty) (illegalForeignTyErr argument ty)
+ where
+ go ty = check (pred ty) (illegalForeignTyErr argument)
------------ Checking result types for foreign calls ----------------------
-- | Check that the type has the form
@@ -419,32 +440,34 @@ checkForeignArgs pred tys = mapM_ go tys
-- We also check that the Safe Haskell condition of FFI imports having
-- results in the IO monad holds.
--
-checkForeignRes :: Bool -> Bool -> (Type -> Bool) -> Type -> TcM ()
+checkForeignRes :: Bool -> Bool -> (Type -> Validity) -> Type -> TcM ()
checkForeignRes non_io_result_ok check_safe pred_res_ty ty
- = case tcSplitIOType_maybe ty of
- -- Got an IO result type, that's always fine!
- Just (_, res_ty) | pred_res_ty res_ty -> return ()
-
- -- Case for non-IO result type with FFI Import
- _ -> do
- dflags <- getDynFlags
- case (pred_res_ty ty && non_io_result_ok) of
- -- handle normal typecheck fail, we want to handle this first and
- -- only report safe haskell errors if the normal type check is OK.
- False -> addErrTc $ illegalForeignTyErr result ty
+ | Just (_, res_ty) <- tcSplitIOType_maybe ty
+ = -- Got an IO result type, that's always fine!
+ check (pred_res_ty res_ty) (illegalForeignTyErr result)
- -- handle safe infer fail
- _ | check_safe && safeInferOn dflags
- -> recordUnsafeInfer
+ -- Case for non-IO result type with FFI Import
+ | not non_io_result_ok
+ = addErrTc $ illegalForeignTyErr result (ptext (sLit "IO result type expected"))
- -- handle safe language typecheck fail
- _ | check_safe && safeLanguageOn dflags
- -> addErrTc $ illegalForeignTyErr result ty $+$ safeHsErr
+ | otherwise
+ = do { dflags <- getDynFlags
+ ; case pred_res_ty ty of
+ -- Handle normal typecheck fail, we want to handle this first and
+ -- only report safe haskell errors if the normal type check is OK.
+ NotValid msg -> addErrTc $ illegalForeignTyErr result msg
+
+ -- handle safe infer fail
+ _ | check_safe && safeInferOn dflags
+ -> recordUnsafeInfer
- -- sucess! non-IO return is fine
- _ -> return ()
+ -- handle safe language typecheck fail
+ _ | check_safe && safeLanguageOn dflags
+ -> addErrTc (illegalForeignTyErr result safeHsErr)
- where
+ -- sucess! non-IO return is fine
+ _ -> return () }
+ where
safeHsErr = ptext $ sLit "Safe Haskell is on, all FFI imports must be in the IO monad"
nonIOok, mustBeIO :: Bool
@@ -459,22 +482,22 @@ noCheckSafe = False
Checking a supported backend is in use
\begin{code}
-checkCOrAsmOrLlvm :: HscTarget -> Maybe SDoc
-checkCOrAsmOrLlvm HscC = Nothing
-checkCOrAsmOrLlvm HscAsm = Nothing
-checkCOrAsmOrLlvm HscLlvm = Nothing
+checkCOrAsmOrLlvm :: HscTarget -> Validity
+checkCOrAsmOrLlvm HscC = IsValid
+checkCOrAsmOrLlvm HscAsm = IsValid
+checkCOrAsmOrLlvm HscLlvm = IsValid
checkCOrAsmOrLlvm _
- = Just (text "requires unregisterised, llvm (-fllvm) or native code generation (-fasm)")
+ = NotValid (text "requires unregisterised, llvm (-fllvm) or native code generation (-fasm)")
-checkCOrAsmOrLlvmOrInterp :: HscTarget -> Maybe SDoc
-checkCOrAsmOrLlvmOrInterp HscC = Nothing
-checkCOrAsmOrLlvmOrInterp HscAsm = Nothing
-checkCOrAsmOrLlvmOrInterp HscLlvm = Nothing
-checkCOrAsmOrLlvmOrInterp HscInterpreted = Nothing
+checkCOrAsmOrLlvmOrInterp :: HscTarget -> Validity
+checkCOrAsmOrLlvmOrInterp HscC = IsValid
+checkCOrAsmOrLlvmOrInterp HscAsm = IsValid
+checkCOrAsmOrLlvmOrInterp HscLlvm = IsValid
+checkCOrAsmOrLlvmOrInterp HscInterpreted = IsValid
checkCOrAsmOrLlvmOrInterp _
- = Just (text "requires interpreted, unregisterised, llvm or native code generation")
+ = NotValid (text "requires interpreted, unregisterised, llvm or native code generation")
-checkCg :: (HscTarget -> Maybe SDoc) -> TcM ()
+checkCg :: (HscTarget -> Validity) -> TcM ()
checkCg check = do
dflags <- getDynFlags
let target = hscTarget dflags
@@ -482,8 +505,8 @@ checkCg check = do
HscNothing -> return ()
_ ->
case check target of
- Nothing -> return ()
- Just err -> addErrTc (text "Illegal foreign declaration:" <+> err)
+ IsValid -> return ()
+ NotValid err -> addErrTc (text "Illegal foreign declaration:" <+> err)
\end{code}
Calling conventions
@@ -512,20 +535,16 @@ checkCConv JavaScriptCallConv = do dflags <- getDynFlags
Warnings
\begin{code}
-check :: Bool -> MsgDoc -> TcM ()
-check True _ = return ()
-check _ the_err = addErrTc the_err
-
-illegalForeignLabelErr :: Type -> SDoc
-illegalForeignLabelErr ty
- = vcat [ illegalForeignTyErr empty ty
- , ptext (sLit "A foreign-imported address (via &foo) must have type (Ptr a) or (FunPtr a)") ]
-
-illegalForeignTyErr :: SDoc -> Type -> SDoc
-illegalForeignTyErr arg_or_res ty
- = hang (hsep [ptext (sLit "Unacceptable"), arg_or_res,
- ptext (sLit "type in foreign declaration:")])
- 2 (hsep [ppr ty])
+check :: Validity -> (MsgDoc -> MsgDoc) -> TcM ()
+check IsValid _ = return ()
+check (NotValid doc) err_fn = addErrTc (err_fn doc)
+
+illegalForeignTyErr :: SDoc -> SDoc -> SDoc
+illegalForeignTyErr arg_or_res extra
+ = hang msg 2 extra
+ where
+ msg = hsep [ ptext (sLit "Unacceptable"), arg_or_res
+ , ptext (sLit "type in foreign declaration:")]
-- Used for 'arg_or_res' argument to illegalForeignTyErr
argument, result :: SDoc
diff --git a/compiler/typecheck/TcGenDeriv.lhs b/compiler/typecheck/TcGenDeriv.lhs
index 3852106d7218..2967630da1a0 100644
--- a/compiler/typecheck/TcGenDeriv.lhs
+++ b/compiler/typecheck/TcGenDeriv.lhs
@@ -11,25 +11,14 @@ This module is nominally ``subordinate'' to @TcDeriv@, which is the
This is where we do all the grimy bindings' generation.
\begin{code}
-{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
module TcGenDeriv (
BagDerivStuff, DerivStuff(..),
- gen_Bounded_binds,
- gen_Enum_binds,
- gen_Eq_binds,
- gen_Ix_binds,
- gen_Ord_binds,
- gen_Read_binds,
- gen_Show_binds,
- gen_Data_binds,
- gen_old_Typeable_binds, gen_Typeable_binds,
- gen_Functor_binds,
+ genDerivedBinds,
FFoldType(..), functorLikeTraverse,
deepSubtypesContaining, foldDataConArgs,
- gen_Foldable_binds,
- gen_Traversable_binds,
mkCoerceClassMethEqn,
gen_Newtype_binds,
genAuxBinds,
@@ -75,6 +64,7 @@ import Bag
import Fingerprint
import TcEnv (InstInfo)
+import ListSetOps( assocMaybe )
import Data.List ( partition, intersperse )
\end{code}
@@ -97,10 +87,43 @@ data DerivStuff -- Please add this auxiliary stuff
| DerivFamInst (FamInst) -- New type family instances
-- New top-level auxiliary bindings
- | DerivHsBind ((Origin, LHsBind RdrName), LSig RdrName) -- Also used for SYB
+ | DerivHsBind (LHsBind RdrName, LSig RdrName) -- Also used for SYB
| DerivInst (InstInfo RdrName) -- New, auxiliary instances
\end{code}
+%************************************************************************
+%* *
+ Top level function
+%* *
+%************************************************************************
+
+\begin{code}
+genDerivedBinds :: DynFlags -> FixityEnv -> Class -> SrcSpan -> TyCon
+ -> (LHsBinds RdrName, BagDerivStuff)
+genDerivedBinds dflags fix_env clas loc tycon
+ | className clas `elem` oldTypeableClassNames
+ = gen_old_Typeable_binds dflags loc tycon
+
+ | Just gen_fn <- assocMaybe gen_list (getUnique clas)
+ = gen_fn loc tycon
+
+ | otherwise
+ = pprPanic "genDerivStuff: bad derived class" (ppr clas)
+ where
+ gen_list :: [(Unique, SrcSpan -> TyCon -> (LHsBinds RdrName, BagDerivStuff))]
+ gen_list = [ (eqClassKey, gen_Eq_binds)
+ , (typeableClassKey, gen_Typeable_binds dflags)
+ , (ordClassKey, gen_Ord_binds)
+ , (enumClassKey, gen_Enum_binds)
+ , (boundedClassKey, gen_Bounded_binds)
+ , (ixClassKey, gen_Ix_binds)
+ , (showClassKey, gen_Show_binds fix_env)
+ , (readClassKey, gen_Read_binds fix_env)
+ , (dataClassKey, gen_Data_binds dflags)
+ , (functorClassKey, gen_Functor_binds)
+ , (foldableClassKey, gen_Foldable_binds)
+ , (traversableClassKey, gen_Traversable_binds) ]
+\end{code}
%************************************************************************
%* *
@@ -276,7 +299,7 @@ Several special cases:
Note [Do not rely on compare]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's a bad idea to define only 'compare', and build the other binary
-comparisions on top of it; see Trac #2130, #4019. Reason: we don't
+comparisons on top of it; see Trac #2130, #4019. Reason: we don't
want to laboriously make a three-way comparison, only to extract a
binary result, something like this:
(>) (I# x) (I# y) = case <# x y of
@@ -360,7 +383,7 @@ gen_Ord_binds loc tycon
(nullary_cons, non_nullary_cons) = partition isNullarySrcDataCon tycon_data_cons
- mkOrdOp :: OrdOp -> (Origin, LHsBind RdrName)
+ mkOrdOp :: OrdOp -> LHsBind RdrName
-- Returns a binding op a b = ... compares a and b according to op ....
mkOrdOp op = mk_easy_FunBind loc (ordMethRdr op) [a_Pat, b_Pat] (mkOrdOpRhs op)
@@ -1210,20 +1233,22 @@ we generate
We are passed the Typeable2 class as well as T
\begin{code}
-gen_old_Typeable_binds :: DynFlags -> SrcSpan -> TyCon -> LHsBinds RdrName
+gen_old_Typeable_binds :: DynFlags -> SrcSpan -> TyCon
+ -> (LHsBinds RdrName, BagDerivStuff)
gen_old_Typeable_binds dflags loc tycon
- = unitBag $
+ = ( unitBag $
mk_easy_FunBind loc
(old_mk_typeOf_RDR tycon) -- Name of appropriate type0f function
[nlWildPat]
(nlHsApps oldMkTyConApp_RDR [tycon_rep, nlList []])
+ , emptyBag )
where
tycon_name = tyConName tycon
modl = nameModule tycon_name
- pkg = modulePackageId modl
+ pkg = modulePackageKey modl
modl_fs = moduleNameFS (moduleName modl)
- pkg_fs = packageIdFS pkg
+ pkg_fs = packageKeyFS pkg
name_fs = occNameFS (nameOccName tycon_name)
tycon_rep = nlHsApps oldMkTyCon_RDR
@@ -1270,17 +1295,19 @@ we generate
We are passed the Typeable2 class as well as T
\begin{code}
-gen_Typeable_binds :: DynFlags -> SrcSpan -> TyCon -> LHsBinds RdrName
+gen_Typeable_binds :: DynFlags -> SrcSpan -> TyCon
+ -> (LHsBinds RdrName, BagDerivStuff)
gen_Typeable_binds dflags loc tycon
- = unitBag $ mk_easy_FunBind loc typeRep_RDR [nlWildPat]
- (nlHsApps mkTyConApp_RDR [tycon_rep, nlList []])
+ = ( unitBag $ mk_easy_FunBind loc typeRep_RDR [nlWildPat]
+ (nlHsApps mkTyConApp_RDR [tycon_rep, nlList []])
+ , emptyBag )
where
tycon_name = tyConName tycon
modl = nameModule tycon_name
- pkg = modulePackageId modl
+ pkg = modulePackageKey modl
modl_fs = moduleNameFS (moduleName modl)
- pkg_fs = packageIdFS pkg
+ pkg_fs = packageKeyFS pkg
name_fs = occNameFS (nameOccName tycon_name)
tycon_rep = nlHsApps mkTyCon_RDR
@@ -1352,7 +1379,7 @@ gen_Data_binds dflags loc tycon
n_cons = length data_cons
one_constr = n_cons == 1
- genDataTyCon :: ((Origin, LHsBind RdrName), LSig RdrName)
+ genDataTyCon :: (LHsBind RdrName, LSig RdrName)
genDataTyCon -- $dT
= (mkHsVarBind loc rdr_name rhs,
L loc (TypeSig [L loc rdr_name] sig_ty))
@@ -1364,7 +1391,7 @@ gen_Data_binds dflags loc tycon
`nlHsApp` nlHsLit (mkHsString (showSDocOneLine dflags (ppr tycon)))
`nlHsApp` nlList constrs
- genDataDataCon :: DataCon -> ((Origin, LHsBind RdrName), LSig RdrName)
+ genDataDataCon :: DataCon -> (LHsBind RdrName, LSig RdrName)
genDataDataCon dc -- $cT1 etc
= (mkHsVarBind loc rdr_name rhs,
L loc (TypeSig [L loc rdr_name] sig_ty))
@@ -1714,10 +1741,10 @@ foldDataConArgs :: FFoldType a -> DataCon -> [a]
foldDataConArgs ft con
= map (functorLikeTraverse tv ft) (dataConOrigArgTys con)
where
- tv = last (dataConUnivTyVars con)
- -- Argument to derive for, 'a in the above description
- -- The validity checks have ensured that con is
- -- a vanilla data constructor
+ Just tv = getTyVar_maybe (last (tyConAppArgs (dataConOrigResTy con)))
+ -- Argument to derive for, 'a in the above description
+ -- The validity and kind checks have ensured that
+ -- the Just will match and a::*
-- Make a HsLam using a fresh variable from a State monad
mkSimpleLam :: (LHsExpr id -> State [id] (LHsExpr id)) -> State [id] (LHsExpr id)
@@ -1943,7 +1970,7 @@ gen_Newtype_binds loc cls inst_tvs cls_tys rhs_ty
(map (mkCoerceClassMethEqn cls inst_tvs cls_tys rhs_ty) (classMethods cls))
where
coerce_RDR = getRdrName coerceId
- mk_bind :: Id -> Pair Type -> (Origin, LHsBind RdrName)
+ mk_bind :: Id -> Pair Type -> LHsBind RdrName
mk_bind id (Pair tau_ty user_ty)
= mkRdrFunBind (L loc meth_RDR) [mkSimpleMatch [] rhs_expr]
where
@@ -1978,7 +2005,7 @@ The `tags' here start at zero, hence the @fIRST_TAG@ (currently one)
fiddling around.
\begin{code}
-genAuxBindSpec :: SrcSpan -> AuxBindSpec -> ((Origin, LHsBind RdrName), LSig RdrName)
+genAuxBindSpec :: SrcSpan -> AuxBindSpec -> (LHsBind RdrName, LSig RdrName)
genAuxBindSpec loc (DerivCon2Tag tycon)
= (mk_FunBind loc rdr_name eqns,
L loc (TypeSig [L loc rdr_name] (L loc sig_ty)))
@@ -2024,7 +2051,7 @@ genAuxBindSpec loc (DerivMaxTag tycon)
data_cons -> toInteger ((length data_cons) - fIRST_TAG)
type SeparateBagsDerivStuff = -- AuxBinds and SYB bindings
- ( Bag ((Origin, LHsBind RdrName), LSig RdrName)
+ ( Bag (LHsBind RdrName, LSig RdrName)
-- Extra bindings (used by Generic only)
, Bag TyCon -- Extra top-level datatypes
, Bag (FamInst) -- Extra family instances
@@ -2079,14 +2106,14 @@ mkParentType tc
\begin{code}
mk_FunBind :: SrcSpan -> RdrName
-> [([LPat RdrName], LHsExpr RdrName)]
- -> (Origin, LHsBind RdrName)
+ -> LHsBind RdrName
mk_FunBind loc fun pats_and_exprs
= mkRdrFunBind (L loc fun) matches
where
matches = [mkMatch p e emptyLocalBinds | (p,e) <-pats_and_exprs]
-mkRdrFunBind :: Located RdrName -> [LMatch RdrName (LHsExpr RdrName)] -> (Origin, LHsBind RdrName)
-mkRdrFunBind fun@(L loc fun_rdr) matches = (Generated, L loc (mkFunBind fun matches'))
+mkRdrFunBind :: Located RdrName -> [LMatch RdrName (LHsExpr RdrName)] -> LHsBind RdrName
+mkRdrFunBind fun@(L loc fun_rdr) matches = L loc (mkFunBind fun matches')
where
-- Catch-all eqn looks like
-- fmap = error "Void fmap"
diff --git a/compiler/typecheck/TcGenGenerics.lhs b/compiler/typecheck/TcGenGenerics.lhs
index 564cd9ef9b2a..d4c39340535b 100644
--- a/compiler/typecheck/TcGenGenerics.lhs
+++ b/compiler/typecheck/TcGenGenerics.lhs
@@ -6,13 +6,7 @@ The deriving code for the Generic class
(equivalent to the code in TcGenDeriv, for other classes)
\begin{code}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
module TcGenGenerics (canDoGenerics, canDoGenerics1,
@@ -42,11 +36,12 @@ import TcEnv
import MkId
import TcRnMonad
import HscTypes
+import ErrUtils( Validity(..), andValid )
import BuildTyCl
import SrcLoc
import Bag
import VarSet (elemVarSet)
-import Outputable
+import Outputable
import FastString
import Util
@@ -64,7 +59,7 @@ import Control.Monad (mplus,forM)
For the generic representation we need to generate:
\begin{itemize}
\item A Generic instance
-\item A Rep type instance
+\item A Rep type instance
\item Many auxiliary datatypes and instances for them (for the meta-information)
\end{itemize}
@@ -90,7 +85,7 @@ genGenericMetaTyCons tc mod =
mkTyCon name = ASSERT( isExternalName name )
buildAlgTyCon name [] [] Nothing [] distinctAbstractTyConRhs
- NonRecursive
+ NonRecursive
False -- Not promotable
False -- Not GADT syntax
NoParentTyCon
@@ -121,60 +116,63 @@ metaTyConsToDerivStuff tc metaDts =
cClas <- tcLookupClass constructorClassName
c_dfun_names <- sequence [ new_dfun_name cClas tc | _ <- metaC metaDts ]
sClas <- tcLookupClass selectorClassName
- s_dfun_names <- sequence (map sequence [ [ new_dfun_name sClas tc
- | _ <- x ]
+ s_dfun_names <- sequence (map sequence [ [ new_dfun_name sClas tc
+ | _ <- x ]
| x <- metaS metaDts ])
fix_env <- getFixityEnv
let
- safeOverlap = safeLanguageOn dflags
(dBinds,cBinds,sBinds) = mkBindsMetaD fix_env tc
- mk_inst clas tc dfun_name
+ mk_inst clas tc dfun_name
= mkLocalInstance (mkDictFunId dfun_name [] [] clas tys)
- (NoOverlap safeOverlap)
+ OverlapFlag { overlapMode = NoOverlap
+ , isSafeOverlap = safeLanguageOn dflags }
[] clas tys
where
tys = [mkTyConTy tc]
-
+
-- Datatype
d_metaTycon = metaD metaDts
d_inst = mk_inst dClas d_metaTycon d_dfun_name
d_binds = InstBindings { ib_binds = dBinds
, ib_pragmas = []
+ , ib_extensions = []
, ib_standalone_deriving = False }
d_mkInst = DerivInst (InstInfo { iSpec = d_inst, iBinds = d_binds })
-
+
-- Constructor
c_metaTycons = metaC metaDts
c_insts = [ mk_inst cClas c ds
| (c, ds) <- myZip1 c_metaTycons c_dfun_names ]
c_binds = [ InstBindings { ib_binds = c
, ib_pragmas = []
+ , ib_extensions = []
, ib_standalone_deriving = False }
| c <- cBinds ]
c_mkInst = [ DerivInst (InstInfo { iSpec = is, iBinds = bs })
| (is,bs) <- myZip1 c_insts c_binds ]
-
+
-- Selector
s_metaTycons = metaS metaDts
s_insts = map (map (\(s,ds) -> mk_inst sClas s ds))
(myZip2 s_metaTycons s_dfun_names)
s_binds = [ [ InstBindings { ib_binds = s
, ib_pragmas = []
+ , ib_extensions = []
, ib_standalone_deriving = False }
| s <- ss ] | ss <- sBinds ]
s_mkInst = map (map (\(is,bs) -> DerivInst (InstInfo { iSpec = is
, iBinds = bs})))
(myZip2 s_insts s_binds)
-
+
myZip1 :: [a] -> [b] -> [(a,b)]
myZip1 l1 l2 = ASSERT(length l1 == length l2) zip l1 l2
-
+
myZip2 :: [[a]] -> [[b]] -> [[(a,b)]]
myZip2 l1 l2 =
ASSERT(and (zipWith (>=) (map length l1) (map length l2)))
[ zip x1 x2 | (x1,x2) <- zip l1 l2 ]
-
+
return $ mapBag DerivTyCon (metaTyCons2TyCons metaDts)
`unionBags` listToBag (d_mkInst : c_mkInst ++ concat s_mkInst)
\end{code}
@@ -186,14 +184,13 @@ metaTyConsToDerivStuff tc metaDts =
%************************************************************************
\begin{code}
-get_gen1_constrained_tys :: TyVar -> [Type] -> [Type]
+get_gen1_constrained_tys :: TyVar -> Type -> [Type]
-- called by TcDeriv.inferConstraints; generates a list of types, each of which
-- must be a Functor in order for the Generic1 instance to work.
-get_gen1_constrained_tys argVar =
- concatMap $ argTyFold argVar $ ArgTyAlg {
- ata_rec0 = const [],
- ata_par1 = [], ata_rec1 = const [],
- ata_comp = (:)}
+get_gen1_constrained_tys argVar
+ = argTyFold argVar $ ArgTyAlg { ata_rec0 = const []
+ , ata_par1 = [], ata_rec1 = const []
+ , ata_comp = (:) }
{-
@@ -239,7 +236,7 @@ following constraints are satisfied.
-}
-canDoGenerics :: TyCon -> [Type] -> Maybe SDoc
+canDoGenerics :: TyCon -> [Type] -> Validity
-- canDoGenerics rep_tc tc_args determines if Generic/Rep can be derived for a
-- type expression (rep_tc tc_arg0 tc_arg1 ... tc_argn).
--
@@ -251,17 +248,17 @@ canDoGenerics tc tc_args
= mergeErrors (
-- Check (c) from Note [Requirements for deriving Generic and Rep].
(if (not (null (tyConStupidTheta tc)))
- then (Just (tc_name <+> text "must not have a datatype context"))
- else Nothing) :
+ then (NotValid (tc_name <+> text "must not have a datatype context"))
+ else IsValid) :
-- Check (a) from Note [Requirements for deriving Generic and Rep].
--
-- Data family indices can be instantiated; the `tc_args` here are
-- the representation tycon args
(if (all isTyVarTy (filterOut isKind tc_args))
- then Nothing
- else Just (tc_name <+> text "must not be instantiated;" <+>
- text "try deriving `" <> tc_name <+> tc_tys <>
- text "' instead"))
+ then IsValid
+ else NotValid (tc_name <+> text "must not be instantiated;" <+>
+ text "try deriving `" <> tc_name <+> tc_tys <>
+ text "' instead"))
-- See comment below
: (map bad_con (tyConDataCons tc)))
where
@@ -279,28 +276,28 @@ canDoGenerics tc tc_args
-- it relies on instantiating *polymorphic* sum and product types
-- at the argument types of the constructors
bad_con dc = if (any bad_arg_type (dataConOrigArgTys dc))
- then (Just (ppr dc <+> text "must not have unlifted or polymorphic arguments"))
+ then (NotValid (ppr dc <+> text "must not have unlifted or polymorphic arguments"))
else (if (not (isVanillaDataCon dc))
- then (Just (ppr dc <+> text "must be a vanilla data constructor"))
- else Nothing)
+ then (NotValid (ppr dc <+> text "must be a vanilla data constructor"))
+ else IsValid)
- -- Nor can we do the job if it's an existential data constructor,
- -- Nor if the args are polymorphic types (I don't think)
+ -- Nor can we do the job if it's an existential data constructor,
+ -- Nor if the args are polymorphic types (I don't think)
bad_arg_type ty = isUnLiftedType ty || not (isTauTy ty)
-mergeErrors :: [Maybe SDoc] -> Maybe SDoc
-mergeErrors [] = Nothing
-mergeErrors ((Just s):t) = case mergeErrors t of
- Nothing -> Just s
- Just s' -> Just (s <> text ", and" $$ s')
-mergeErrors (Nothing :t) = mergeErrors t
+mergeErrors :: [Validity] -> Validity
+mergeErrors [] = IsValid
+mergeErrors (NotValid s:t) = case mergeErrors t of
+ IsValid -> NotValid s
+ NotValid s' -> NotValid (s <> text ", and" $$ s')
+mergeErrors (IsValid : t) = mergeErrors t
-- A datatype used only inside of canDoGenerics1. It's the result of analysing
-- a type term.
data Check_for_CanDoGenerics1 = CCDG1
{ _ccdg1_hasParam :: Bool -- does the parameter of interest occurs in
-- this type?
- , _ccdg1_errors :: Maybe SDoc -- errors generated by this type
+ , _ccdg1_errors :: Validity -- errors generated by this type
}
{-
@@ -335,13 +332,13 @@ explicitly, even though foldDataConArgs is also doing this internally.
-- are taken care of by the call to canDoGenerics.
--
-- It returns Nothing if deriving is possible. It returns (Just reason) if not.
-canDoGenerics1 :: TyCon -> [Type] -> Maybe SDoc
+canDoGenerics1 :: TyCon -> [Type] -> Validity
canDoGenerics1 rep_tc tc_args =
- canDoGenerics rep_tc tc_args `mplus` additionalChecks
+ canDoGenerics rep_tc tc_args `andValid` additionalChecks
where
additionalChecks
-- check (f) from Note [Requirements for deriving Generic and Rep]
- | null (tyConTyVars rep_tc) = Just $
+ | null (tyConTyVars rep_tc) = NotValid $
ptext (sLit "Data type") <+> quotes (ppr rep_tc)
<+> ptext (sLit "must have some type parameters")
@@ -349,19 +346,19 @@ canDoGenerics1 rep_tc tc_args =
data_cons = tyConDataCons rep_tc
check_con con = case check_vanilla con of
- j@(Just _) -> [j]
- Nothing -> _ccdg1_errors `map` foldDataConArgs (ft_check con) con
+ j@(NotValid {}) -> [j]
+ IsValid -> _ccdg1_errors `map` foldDataConArgs (ft_check con) con
bad :: DataCon -> SDoc -> SDoc
bad con msg = ptext (sLit "Constructor") <+> quotes (ppr con) <+> msg
- check_vanilla :: DataCon -> Maybe SDoc
- check_vanilla con | isVanillaDataCon con = Nothing
- | otherwise = Just (bad con existential)
+ check_vanilla :: DataCon -> Validity
+ check_vanilla con | isVanillaDataCon con = IsValid
+ | otherwise = NotValid (bad con existential)
- bmzero = CCDG1 False Nothing
- bmbad con s = CCDG1 True $ Just $ bad con s
- bmplus (CCDG1 b1 m1) (CCDG1 b2 m2) = CCDG1 (b1 || b2) (mplus m1 m2)
+ bmzero = CCDG1 False IsValid
+ bmbad con s = CCDG1 True $ NotValid $ bad con s
+ bmplus (CCDG1 b1 m1) (CCDG1 b2 m2) = CCDG1 (b1 || b2) (m1 `andValid` m2)
-- check (g) from Note [degenerate use of FFoldType]
ft_check :: DataCon -> FFoldType Check_for_CanDoGenerics1
@@ -389,7 +386,7 @@ canDoGenerics1 rep_tc tc_args =
, ft_forall = \_ body -> body -- polytypes are handled elsewhere
}
where
- caseVar = CCDG1 True Nothing
+ caseVar = CCDG1 True IsValid
existential = text "must not have existential arguments"
@@ -399,13 +396,13 @@ canDoGenerics1 rep_tc tc_args =
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Generating the RHS of a generic default method}
-%* *
+%* *
%************************************************************************
\begin{code}
-type US = Int -- Local unique supply, just a plain Int
+type US = Int -- Local unique supply, just a plain Int
type Alt = (LPat RdrName, LHsExpr RdrName)
-- GenericKind serves to mark if a datatype derives Generic (Gen0) or
@@ -432,7 +429,7 @@ gk2gkDC Gen1_{} d = Gen1_DC $ last $ dataConUnivTyVars d
-- Bindings for the Generic instance
mkBindsRep :: GenericKind -> TyCon -> LHsBinds RdrName
-mkBindsRep gk tycon =
+mkBindsRep gk tycon =
unitBag (mkRdrFunBind (L loc from01_RDR) from_matches)
`unionBags`
unitBag (mkRdrFunBind (L loc to01_RDR) to_matches)
@@ -454,7 +451,7 @@ mkBindsRep gk tycon =
Gen1 -> ASSERT(length tyvars >= 1)
Gen1_ (last tyvars)
where tyvars = tyConTyVars tycon
-
+
--------------------------------------------------------------------------------
-- The type synonym instance and synonym
-- type instance Rep (D a b) = Rep_D a b
@@ -466,7 +463,7 @@ tc_mkRepFamInsts :: GenericKind -- Gen0 or Gen1
-> MetaTyCons -- Metadata datatypes to refer to
-> Module -- Used as the location of the new RepTy
-> TcM (FamInst) -- Generated representation0 coercion
-tc_mkRepFamInsts gk tycon metaDts mod =
+tc_mkRepFamInsts gk tycon metaDts mod =
-- Consider the example input tycon `D`, where data D a b = D_ a
-- Also consider `R:DInt`, where { data family D x y :: * -> *
-- ; data instance D Int a b = D_ a }
@@ -499,7 +496,7 @@ tc_mkRepFamInsts gk tycon metaDts mod =
-- `repTy` = D1 ... (C1 ... (S1 ... (Rec0 a))) :: * -> *
; repTy <- tc_mkRepTy gk_ tycon metaDts
-
+
-- `rep_name` is a name we generate for the synonym
; rep_name <- let mkGen = case gk of Gen0 -> mkGenR; Gen1 -> mkGen1R
in newGlobalBinder mod (mkGen (nameOccName (tyConName tycon)))
@@ -582,10 +579,10 @@ tc_mkRepTy :: -- Gen0_ or Gen1_, for Rep or Rep1
-- The type to generate representation for
-> TyCon
-- Metadata datatypes to refer to
- -> MetaTyCons
+ -> MetaTyCons
-- Generated representation0 type
-> TcM Type
-tc_mkRepTy gk_ tycon metaDts =
+tc_mkRepTy gk_ tycon metaDts =
do
d1 <- tcLookupTyCon d1TyConName
c1 <- tcLookupTyCon c1TyConName
@@ -599,7 +596,7 @@ tc_mkRepTy gk_ tycon metaDts =
plus <- tcLookupTyCon sumTyConName
times <- tcLookupTyCon prodTyConName
comp <- tcLookupTyCon compTyConName
-
+
let mkSum' a b = mkTyConApp plus [a,b]
mkProd a b = mkTyConApp times [a,b]
mkComp a b = mkTyConApp comp [a,b]
@@ -613,7 +610,7 @@ tc_mkRepTy gk_ tycon metaDts =
mkS True _ a = mkTyConApp s1 [mkTyConTy nS1, a]
-- This field has a label
mkS False d a = mkTyConApp s1 [d, a]
-
+
-- Sums and products are done in the same way for both Rep and Rep1
sumP [] = mkTyConTy v1
sumP l = ASSERT(length metaCTyCons == length l)
@@ -628,9 +625,9 @@ tc_mkRepTy gk_ tycon metaDts =
ASSERT(length l == length (metaSTyCons !! i))
foldBal mkProd [ arg d t b
| (d,t) <- zip (metaSTyCons !! i) l ]
-
+
arg :: Type -> Type -> Bool -> Type
- arg d t b = mkS b d $ case gk_ of
+ arg d t b = mkS b d $ case gk_ of
-- Here we previously used Par0 if t was a type variable, but we
-- realized that we can't always guarantee that we are wrapping-up
-- all type variables in Par0. So we decided to stop using Par0
@@ -643,40 +640,40 @@ tc_mkRepTy gk_ tycon metaDts =
argPar argVar = argTyFold argVar $ ArgTyAlg
{ata_rec0 = mkRec0, ata_par1 = mkPar1,
ata_rec1 = mkRec1, ata_comp = mkComp}
-
-
+
+
metaDTyCon = mkTyConTy (metaD metaDts)
metaCTyCons = map mkTyConTy (metaC metaDts)
metaSTyCons = map (map mkTyConTy) (metaS metaDts)
-
+
return (mkD tycon)
--------------------------------------------------------------------------------
-- Meta-information
--------------------------------------------------------------------------------
-data MetaTyCons = MetaTyCons { -- One meta datatype per dataype
+data MetaTyCons = MetaTyCons { -- One meta datatype per datatype
metaD :: TyCon
-- One meta datatype per constructor
, metaC :: [TyCon]
-- One meta datatype per selector per constructor
, metaS :: [[TyCon]] }
-
+
instance Outputable MetaTyCons where
ppr (MetaTyCons d c s) = ppr d $$ vcat (map ppr c) $$ vcat (map ppr (concat s))
-
+
metaTyCons2TyCons :: MetaTyCons -> Bag TyCon
metaTyCons2TyCons (MetaTyCons d c s) = listToBag (d : c ++ concat s)
-- Bindings for Datatype, Constructor, and Selector instances
-mkBindsMetaD :: FixityEnv -> TyCon
+mkBindsMetaD :: FixityEnv -> TyCon
-> ( LHsBinds RdrName -- Datatype instance
, [LHsBinds RdrName] -- Constructor instances
, [[LHsBinds RdrName]]) -- Selector instances
mkBindsMetaD fix_env tycon = (dtBinds, allConBinds, allSelBinds)
where
- mkBag l = foldr1 unionBags
+ mkBag l = foldr1 unionBags
[ unitBag (mkRdrFunBind (L loc name) matches)
| (name, matches) <- l ]
dtBinds = mkBag ( [ (datatypeName_RDR, dtName_matches)
@@ -714,7 +711,7 @@ mkBindsMetaD fix_env tycon = (dtBinds, allConBinds, allSelBinds)
dtName_matches = mkStringLHS . occNameString . nameOccName
$ tyConName_user
- moduleName_matches = mkStringLHS . moduleNameString . moduleName
+ moduleName_matches = mkStringLHS . moduleNameString . moduleName
. nameModule . tyConName $ tycon
isNewtype_matches = [mkSimpleHsAlt nlWildPat (nlHsVar true_RDR)]
@@ -775,10 +772,10 @@ mk1Sum gk_ us i n datacon = (from_alt, to_alt)
us' = us + n_args
datacon_rdr = getRdrName datacon
-
+
from_alt = (nlConVarPat datacon_rdr datacon_vars, from_alt_rhs)
from_alt_rhs = mkM1_E (genLR_E i n (mkProd_E gk_ us' datacon_varTys))
-
+
to_alt = (mkM1_P (genLR_P i n (mkProd_P gk us' datacon_vars)), to_alt_rhs)
-- These M1s are meta-information for the datatype
to_alt_rhs = case gk_ of
@@ -819,9 +816,9 @@ genLR_E i n e
-- Build a product expression
mkProd_E :: GenericKind_DC -- Generic or Generic1?
- -> US -- Base for unique names
+ -> US -- Base for unique names
-> [(RdrName, Type)] -- List of variables matched on the lhs and their types
- -> LHsExpr RdrName -- Resulting product expression
+ -> LHsExpr RdrName -- Resulting product expression
mkProd_E _ _ [] = mkM1_E (nlHsVar u1DataCon_RDR)
mkProd_E gk_ _ varTys = mkM1_E (foldBal prod appVars)
-- These M1s are meta-information for the constructor
@@ -845,9 +842,9 @@ wrapArg_E (Gen1_DC argVar) (var, ty) = mkM1_E $ converter ty `nlHsApp` nlHsVar v
-- Build a product pattern
mkProd_P :: GenericKind -- Gen0 or Gen1
- -> US -- Base for unique names
- -> [RdrName] -- List of variables to match
- -> LPat RdrName -- Resulting product pattern
+ -> US -- Base for unique names
+ -> [RdrName] -- List of variables to match
+ -> LPat RdrName -- Resulting product pattern
mkProd_P _ _ [] = mkM1_P (nlNullaryConPat u1DataCon_RDR)
mkProd_P gk _ vars = mkM1_P (foldBal prod appVars)
-- These M1s are meta-information for the constructor
diff --git a/compiler/typecheck/TcHsSyn.lhs b/compiler/typecheck/TcHsSyn.lhs
index 1c9ac57e8098..f4d5cf262cad 100644
--- a/compiler/typecheck/TcHsSyn.lhs
+++ b/compiler/typecheck/TcHsSyn.lhs
@@ -9,12 +9,15 @@ This module is an extension of @HsSyn@ syntax, for use in the type
checker.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcHsSyn (
mkHsConApp, mkHsDictLet, mkHsApp,
hsLitType, hsLPatType, hsPatType,
mkHsAppTy, mkSimpleHsAlt,
nlHsIntLit,
shortCutLit, hsOverLitName,
+ conLikeResTy,
-- re-exported from TcMonad
TcId, TcIdSet,
@@ -38,7 +41,9 @@ import TcEvidence
import TysPrim
import TysWiredIn
import Type
+import ConLike
import DataCon
+import PatSyn( patSynInstResTy )
import Name
import NameSet
import Var
@@ -80,14 +85,19 @@ hsPatType (ViewPat _ _ ty) = ty
hsPatType (ListPat _ ty Nothing) = mkListTy ty
hsPatType (ListPat _ _ (Just (ty,_))) = ty
hsPatType (PArrPat _ ty) = mkPArrTy ty
-hsPatType (TuplePat _ _ ty) = ty
-hsPatType (ConPatOut { pat_ty = ty }) = ty
+hsPatType (TuplePat _ bx tys) = mkTupleTy (boxityNormalTupleSort bx) tys
+hsPatType (ConPatOut { pat_con = L _ con, pat_arg_tys = tys })
+ = conLikeResTy con tys
hsPatType (SigPatOut _ ty) = ty
hsPatType (NPat lit _ _) = overLitType lit
hsPatType (NPlusKPat id _ _ _) = idType (unLoc id)
hsPatType (CoPat _ _ ty) = ty
hsPatType p = pprPanic "hsPatType" (ppr p)
+conLikeResTy :: ConLike -> [Type] -> Type
+conLikeResTy (RealDataCon con) tys = mkTyConApp (dataConTyCon con) tys
+conLikeResTy (PatSynCon ps) tys = patSynInstResTy ps tys
+
hsLitType :: HsLit -> TcType
hsLitType (HsChar _) = charTy
hsLitType (HsCharPrim _) = charPrimTy
@@ -405,10 +415,8 @@ warnMissingSig msg id
zonkMonoBinds :: ZonkEnv -> SigWarn -> LHsBinds TcId -> TcM (LHsBinds Id)
zonkMonoBinds env sig_warn binds = mapBagM (zonk_lbind env sig_warn) binds
-zonk_lbind :: ZonkEnv -> SigWarn -> (Origin, LHsBind TcId) -> TcM (Origin, LHsBind Id)
-zonk_lbind env sig_warn (origin, lbind)
- = do { lbind' <- wrapLocM (zonk_bind env sig_warn) lbind
- ; return (origin, lbind') }
+zonk_lbind :: ZonkEnv -> SigWarn -> LHsBind TcId -> TcM (LHsBind Id)
+zonk_lbind env sig_warn = wrapLocM (zonk_bind env sig_warn)
zonk_bind :: ZonkEnv -> SigWarn -> HsBind TcId -> TcM (HsBind Id)
zonk_bind env sig_warn bind@(PatBind { pat_lhs = pat, pat_rhs = grhss, pat_rhs_ty = ty})
@@ -460,18 +468,19 @@ zonk_bind env sig_warn (AbsBinds { abs_tvs = tyvars, abs_ev_vars = evs
, abe_mono = zonkIdOcc env mono_id
, abe_prags = new_prags })
-zonk_bind env _sig_warn bind@(PatSynBind { patsyn_id = L loc id
- , patsyn_args = details
- , patsyn_def = lpat
- , patsyn_dir = dir })
+zonk_bind env _sig_warn (PatSynBind bind@(PSB { psb_id = L loc id
+ , psb_args = details
+ , psb_def = lpat
+ , psb_dir = dir }))
= do { id' <- zonkIdBndr env id
; details' <- zonkPatSynDetails env details
;(env1, lpat') <- zonkPat env lpat
; (_env2, dir') <- zonkPatSynDir env1 dir
- ; return (bind { patsyn_id = L loc id'
- , patsyn_args = details'
- , patsyn_def = lpat'
- , patsyn_dir = dir' }) }
+ ; return $ PatSynBind $
+ bind { psb_id = L loc id'
+ , psb_args = details'
+ , psb_def = lpat'
+ , psb_dir = dir' } }
zonkPatSynDetails :: ZonkEnv
-> HsPatSynDetails (Located TcId)
@@ -481,6 +490,9 @@ zonkPatSynDetails env = traverse (wrapLocM $ zonkIdBndr env)
zonkPatSynDir :: ZonkEnv -> HsPatSynDir TcId -> TcM (ZonkEnv, HsPatSynDir Id)
zonkPatSynDir env Unidirectional = return (env, Unidirectional)
zonkPatSynDir env ImplicitBidirectional = return (env, ImplicitBidirectional)
+zonkPatSynDir env (ExplicitBidirectional mg) = do
+ mg' <- zonkMatchGroup env zonkLExpr mg
+ return (env, ExplicitBidirectional mg')
zonkSpecPrags :: ZonkEnv -> TcSpecPrags -> TcM TcSpecPrags
zonkSpecPrags _ IsDefaultMethod = return IsDefaultMethod
@@ -506,11 +518,11 @@ zonkLTcSpecPrags env ps
zonkMatchGroup :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> MatchGroup TcId (Located (body TcId)) -> TcM (MatchGroup Id (Located (body Id)))
-zonkMatchGroup env zBody (MG { mg_alts = ms, mg_arg_tys = arg_tys, mg_res_ty = res_ty })
+zonkMatchGroup env zBody (MG { mg_alts = ms, mg_arg_tys = arg_tys, mg_res_ty = res_ty, mg_origin = origin })
= do { ms' <- mapM (zonkMatch env zBody) ms
; arg_tys' <- zonkTcTypeToTypes env arg_tys
; res_ty' <- zonkTcTypeToType env res_ty
- ; return (MG { mg_alts = ms', mg_arg_tys = arg_tys', mg_res_ty = res_ty' }) }
+ ; return (MG { mg_alts = ms', mg_arg_tys = arg_tys', mg_res_ty = res_ty', mg_origin = origin }) }
zonkMatch :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
@@ -1027,16 +1039,16 @@ zonk_pat env (PArrPat pats ty)
; (env', pats') <- zonkPats env pats
; return (env', PArrPat pats' ty') }
-zonk_pat env (TuplePat pats boxed ty)
- = do { ty' <- zonkTcTypeToType env ty
+zonk_pat env (TuplePat pats boxed tys)
+ = do { tys' <- mapM (zonkTcTypeToType env) tys
; (env', pats') <- zonkPats env pats
- ; return (env', TuplePat pats' boxed ty') }
+ ; return (env', TuplePat pats' boxed tys') }
-zonk_pat env p@(ConPatOut { pat_ty = ty, pat_tvs = tyvars
+zonk_pat env p@(ConPatOut { pat_arg_tys = tys, pat_tvs = tyvars
, pat_dicts = evs, pat_binds = binds
, pat_args = args, pat_wrap = wrapper })
= ASSERT( all isImmutableTyVar tyvars )
- do { new_ty <- zonkTcTypeToType env ty
+ do { new_tys <- mapM (zonkTcTypeToType env) tys
; (env0, new_tyvars) <- zonkTyBndrsX env tyvars
-- Must zonk the existential variables, because their
-- /kind/ need potential zonking.
@@ -1045,7 +1057,7 @@ zonk_pat env p@(ConPatOut { pat_ty = ty, pat_tvs = tyvars
; (env2, new_binds) <- zonkTcEvBinds env1 binds
; (env3, new_wrapper) <- zonkCoFn env2 wrapper
; (env', new_args) <- zonkConStuff env3 args
- ; return (env', p { pat_ty = new_ty,
+ ; return (env', p { pat_arg_tys = new_tys,
pat_tvs = new_tyvars,
pat_dicts = new_evs,
pat_binds = new_binds,
diff --git a/compiler/typecheck/TcHsType.lhs b/compiler/typecheck/TcHsType.lhs
index eed906898b7d..39c0acf2a6b1 100644
--- a/compiler/typecheck/TcHsType.lhs
+++ b/compiler/typecheck/TcHsType.lhs
@@ -5,7 +5,8 @@
\section[TcMonoType]{Typechecking user-specified @MonoTypes@}
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -24,7 +25,6 @@ module TcHsType (
-- Kind-checking types
-- No kind generalisation, no checkValidType
- KindCheckingStrategy(..), kcStrategy, kcStrategyFamDecl,
kcHsTyVarBndrs, tcHsTyVarBndrs,
tcHsLiftedType, tcHsOpenType,
tcLHsType, tcCheckLHsType,
@@ -53,6 +53,7 @@ import TcType
import Type
import TypeRep( Type(..) ) -- For the mkNakedXXX stuff
import Kind
+import RdrName( lookupLocalRdrOcc )
import Var
import VarSet
import TyCon
@@ -72,8 +73,9 @@ import Outputable
import FastString
import Util
+import Data.Maybe( isNothing )
import Control.Monad ( unless, when, zipWithM )
-import PrelNames( ipClassName, funTyConKey )
+import PrelNames( ipClassName, funTyConKey, allNameStrings )
\end{code}
@@ -207,18 +209,22 @@ tc_inst_head hs_ty
= tc_hs_type hs_ty ekConstraint
-----------------
-tcHsDeriv :: HsType Name -> TcM ([TyVar], Class, [Type])
--- Like tcHsSigTypeNC, but for the ...deriving( ty ) clause
-tcHsDeriv hs_ty
- = do { kind <- newMetaKindVar
- ; ty <- tcCheckHsTypeAndGen hs_ty kind
- -- Funny newtype deriving form
- -- forall a. C [a]
- -- where C has arity 2. Hence any-kinded result
- ; ty <- zonkSigType ty
+tcHsDeriv :: HsType Name -> TcM ([TyVar], Class, [Type], Kind)
+-- Like tcHsSigTypeNC, but for the ...deriving( C t1 ty2 ) clause
+-- Returns the C, [ty1, ty2, and the kind of C's *next* argument
+-- E.g. class C (a::*) (b::k->k)
+-- data T a b = ... deriving( C Int )
+-- returns ([k], C, [k, Int], k->k)
+-- Also checks that (C ty1 ty2 arg) :: Constraint
+-- if arg has a suitable kind
+tcHsDeriv hs_ty
+ = do { arg_kind <- newMetaKindVar
+ ; ty <- tcCheckHsTypeAndGen hs_ty (mkArrowKind arg_kind constraintKind)
+ ; ty <- zonkSigType ty
+ ; arg_kind <- zonkSigType arg_kind
; let (tvs, pred) = splitForAllTys ty
; case getClassPredTys_maybe pred of
- Just (cls, tys) -> return (tvs, cls, tys)
+ Just (cls, tys) -> return (tvs, cls, tys, arg_kind)
Nothing -> failWithTc (ptext (sLit "Illegal deriving item") <+> quotes (ppr hs_ty)) }
-- Used for 'VECTORISE [SCALAR] instance' declarations
@@ -389,13 +395,17 @@ tc_hs_type hs_ty@(HsAppTy ty1 ty2) exp_kind
(fun_ty, arg_tys) = splitHsAppTys ty1 [ty2]
--------- Foralls
-tc_hs_type hs_ty@(HsForAllTy _ hs_tvs context ty) exp_kind
- = tcHsTyVarBndrs hs_tvs $ \ tvs' ->
+tc_hs_type hs_ty@(HsForAllTy _ hs_tvs context ty) exp_kind@(EK exp_k _)
+ | isConstraintKind exp_k
+ = failWithTc (hang (ptext (sLit "Illegal constraint:")) 2 (ppr hs_ty))
+
+ | otherwise
+ = tcHsTyVarBndrs hs_tvs $ \ tvs' ->
-- Do not kind-generalise here! See Note [Kind generalisation]
do { ctxt' <- tcHsContext context
; ty' <- if null (unLoc context) then -- Plain forall, no context
tc_lhs_type ty exp_kind -- Why exp_kind? See Note [Body kind of forall]
- else
+ else
-- If there is a context, then this forall is really a
-- _function_, so the kind of the result really is *
-- The body kind (result of the function can be * or #, hence ekOpen
@@ -614,7 +624,6 @@ tcTyVar :: Name -> TcM (TcType, TcKind)
tcTyVar name -- Could be a tyvar, a tycon, or a datacon
= do { traceTc "lk1" (ppr name)
; thing <- tcLookup name
- ; traceTc "lk2" (ppr name <+> ppr thing)
; case thing of
ATyVar _ tv
| isKindVar tv
@@ -724,17 +733,17 @@ mkNakedAppTys ty1 tys2 = foldl AppTy ty1 tys2
zonkSigType :: TcType -> TcM TcType
-- Zonk the result of type-checking a user-written type signature
--- It may have kind varaibles in it, but no meta type variables
+-- It may have kind variables in it, but no meta type variables
-- Because of knot-typing (see Note [Zonking inside the knot])
--- it may need to establish the Type invariants;
+-- it may need to establish the Type invariants;
-- hence the use of mkTyConApp and mkAppTy
zonkSigType ty
= go ty
where
go (TyConApp tc tys) = do tys' <- mapM go tys
return (mkTyConApp tc tys')
- -- Key point: establish Type invariants!
- -- See Note [Zonking inside the knot]
+ -- Key point: establish Type invariants!
+ -- See Note [Zonking inside the knot]
go (LitTy n) = return (LitTy n)
@@ -892,181 +901,7 @@ addTypeCtxt (L _ ty) thing
%* *
%************************************************************************
-Note [Kind-checking strategies]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are three main declarations that we have to kind check carefully in the
-presence of -XPolyKinds: classes, datatypes, and data/type families. They each
-have a different kind-checking strategy (labeled in the parentheses above each
-section). This should potentially be cleaned up in the future, but this is how
-it stands now (June 2013).
-
-Classes (ParametricKinds):
- - kind-polymorphic by default
- - each un-annotated type variable is given a fresh meta kind variable
- - every explicit kind variable becomes a SigTv during inference
- - no generalisation is done while kind-checking the recursive group
-
- Taken together, this means that classes cannot participate in polymorphic
- recursion. Thus, the following is not definable:
-
- class Fugly (a :: k) where
- foo :: forall (b :: k -> *). Fugly b => b a
-
- But, because explicit kind variables are SigTvs, it is OK for the kind to
- be forced to be the same kind that is used in a separate declaration. See
- test case polykinds/T7020.hs.
-
-Datatypes:
- Here we have two cases, whether or not a Full Kind Signature is provided.
- A Full Kind Signature means that there is a top-level :: in the definition
- of the datatype. For example:
-
- data T1 :: k -> Bool -> * where ... -- YES
- data T2 (a :: k) :: Bool -> * where ... -- YES
- data T3 (a :: k) (b :: Bool) :: * where ... -- YES
- data T4 (a :: k) (b :: Bool) where ... -- NO
-
- Kind signatures are not allowed on datatypes declared in the H98 style,
- so those always have no Full Kind Signature.
-
- Full Kind Signature (FullKindSignature):
- - each un-annotated type variable defaults to *
- - every explicit kind variable becomes a skolem during type inference
- - these kind variables are generalised *before* kind-checking the group
-
- With these rules, polymorphic recursion is possible. This is essentially
- because of the generalisation step before kind-checking the group -- it
- gives the kind-checker enough flexibility to supply the right kind arguments
- to support polymorphic recursion.
-
- no Full Kind Signature (ParametricKinds):
- - kind-polymorphic by default
- - each un-annotated type variable is given a fresh meta kind variable
- - every explicit kind variable becomes a SigTv during inference
- - no generalisation is done while kind-checking the recursive group
-
- Thus, no polymorphic recursion in this case. See also Trac #6093 & #6049.
-
-Type families:
- Here we have three cases: open top-level families, closed top-level families,
- and open associated types. (There are no closed associated types, for good
- reason.)
-
- Open top-level families (FullKindSignature):
- - All open top-level families are considered to have a Full Kind Signature
- - All arguments and the result default to *
- - All kind variables are skolems
- - All kind variables are generalised before kind-checking the group
-
- This behaviour supports kind-indexed type and data families, because we
- need to have generalised before kind-checking for this to work. For example:
-
- type family F (a :: k)
- type instance F Int = Bool
- type instance F Maybe = Char
- type instance F (x :: * -> * -> *) = Double
-
- Closed top-level families (NonParametricKinds):
- - kind-monomorphic by default
- - each un-annotated type variable is given a fresh meta kind variable
- - every explicit kind variable becomes a skolem during inference
- - all such skolems are generalised before kind-checking; other kind
- variables are not generalised
- - all unconstrained meta kind variables are defaulted to * at the
- end of kind checking
-
- This behaviour is to allow kind inference to occur in closed families, but
- without becoming too polymorphic. For example:
-
- type family F a where
- F Int = Bool
- F Bool = Char
-
- We would want F to have kind * -> * from this definition, although something
- like k1 -> k2 would be perfectly sound. The reason we want this restriction is
- that it is better to have (F Maybe) be a kind error than simply stuck.
-
- The kind inference gives us also
-
- type family Not b where
- Not False = True
- Not True = False
-
- With an open family, the above would need kind annotations in its header.
-
- The tricky case is
-
- type family G a (b :: k) where
- G Int Int = False
- G Bool Maybe = True
-
- We want this to work. But, we also want (G Maybe Maybe) to be a kind error
- (in the first argument). So, we need to generalise the skolem "k" but not
- the meta kind variable associated with "a".
-
- Associated families (FullKindSignature):
- - Kind-monomorphic by default
- - Result kind defaults to *
- - Each type variable is either in the class header or not:
- - Type variables in the class header are given the kind inherited from
- the class header (and checked against an annotation, if any)
- - Un-annotated type variables default to *
- - Each kind variable mentioned in the class header becomes a SigTv during
- kind inference.
- - Each kind variable not mentioned in the class header becomes a skolem during
- kind inference.
- - Only the skolem kind variables are generalised before kind checking.
-
- Here are some examples:
-
- class Foo1 a b where
- type Bar1 (a :: k) (b :: k)
-
- The kind of Foo1 will be k -> k -> Constraint. Kind annotations on associated
- type declarations propagate to the header because the type variables in Bar1's
- declaration inherit the (meta) kinds of the class header.
-
- class Foo2 a where
- type Bar2 a
-
- The kind of Bar2 will be k -> *.
-
- class Foo3 a where
- type Bar3 a (b :: k)
- meth :: Bar3 a Maybe -> ()
-
- The kind of Bar3 will be k1 -> k2 -> *. This only kind-checks because the kind
- of b is generalised before kind-checking.
-
- class Foo4 a where
- type Bar4 a b
-
- Here, the kind of Bar4 will be k -> * -> *, because b is not mentioned in the
- class header, so it defaults to *.
-
\begin{code}
-data KindCheckingStrategy -- See Note [Kind-checking strategies]
- = ParametricKinds
- | NonParametricKinds
- | FullKindSignature
- deriving (Eq)
-
--- determine the appropriate strategy for a decl
-kcStrategy :: TyClDecl Name -> KindCheckingStrategy
-kcStrategy d@(ForeignType {}) = pprPanic "kcStrategy" (ppr d)
-kcStrategy (FamDecl fam_decl)
- = kcStrategyFamDecl fam_decl
-kcStrategy (SynDecl {}) = ParametricKinds
-kcStrategy (DataDecl { tcdDataDefn = HsDataDefn { dd_kindSig = m_ksig }})
- | Just _ <- m_ksig = FullKindSignature
- | otherwise = ParametricKinds
-kcStrategy (ClassDecl {}) = ParametricKinds
-
--- if the ClosedTypeFamily has no equations, do the defaulting to *, etc.
-kcStrategyFamDecl :: FamilyDecl Name -> KindCheckingStrategy
-kcStrategyFamDecl (FamilyDecl { fdInfo = ClosedTypeFamily (_:_) }) = NonParametricKinds
-kcStrategyFamDecl _ = FullKindSignature
mkKindSigVar :: Name -> TcM KindVar
-- Use the specified name; don't clone it
@@ -1087,13 +922,17 @@ kcScopedKindVars kv_ns thing_inside
-- NB: use mutable signature variables
; tcExtendTyVarEnv2 (kv_ns `zip` kvs) thing_inside }
-kcHsTyVarBndrs :: KindCheckingStrategy
+-- | Kind-check a 'LHsTyVarBndrs'. If the decl under consideration has a complete,
+-- user-supplied kind signature (CUSK), generalise the result. Used in 'getInitialKind'
+-- and in kind-checking. See also Note [Complete user-supplied kind signatures] in
+-- HsDecls.
+kcHsTyVarBndrs :: Bool -- ^ True <=> the decl being checked has a CUSK
-> LHsTyVarBndrs Name
- -> TcM (Kind, r) -- the result kind, possibly with other info
- -> TcM (Kind, r)
--- Used in getInitialKind
-kcHsTyVarBndrs strat (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
- = do { kvs <- if skolem_kvs
+ -> TcM (Kind, r) -- ^ the result kind, possibly with other info
+ -> TcM (Kind, r) -- ^ The full kind of the thing being declared,
+ -- with the other info
+kcHsTyVarBndrs cusk (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
+ = do { kvs <- if cusk
then mapM mkKindSigVar kv_ns
else mapM (\n -> newSigTyVar n superKind) kv_ns
; tcExtendTyVarEnv2 (kv_ns `zip` kvs) $
@@ -1102,24 +941,18 @@ kcHsTyVarBndrs strat (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
; let full_kind = mkArrowKinds (map snd nks) res_kind
kvs = filter (not . isMetaTyVar) $
varSetElems $ tyVarsOfType full_kind
- gen_kind = if generalise
+ gen_kind = if cusk
then mkForAllTys kvs full_kind
else full_kind
; return (gen_kind, stuff) } }
where
- -- See Note [Kind-checking strategies]
- (skolem_kvs, default_to_star, generalise) = case strat of
- ParametricKinds -> (False, False, False)
- NonParametricKinds -> (True, False, True)
- FullKindSignature -> (True, True, True)
-
kc_hs_tv :: HsTyVarBndr Name -> TcM (Name, TcKind)
kc_hs_tv (UserTyVar n)
= do { mb_thing <- tcLookupLcl_maybe n
; kind <- case mb_thing of
- Just (AThing k) -> return k
- _ | default_to_star -> return liftedTypeKind
- | otherwise -> newMetaKindVar
+ Just (AThing k) -> return k
+ _ | cusk -> return liftedTypeKind
+ | otherwise -> newMetaKindVar
; return (n, kind) }
kc_hs_tv (KindedTyVar n k)
= do { kind <- tcLHsKind k
@@ -1249,7 +1082,11 @@ kcTyClTyVars :: Name -> LHsTyVarBndrs Name -> TcM a -> TcM a
kcTyClTyVars name (HsQTvs { hsq_kvs = kvs, hsq_tvs = hs_tvs }) thing_inside
= kcScopedKindVars kvs $
do { tc_kind <- kcLookupKind name
- ; let (arg_ks, _res_k) = splitKindFunTysN (length hs_tvs) tc_kind
+ ; let (_, mono_kind) = splitForAllTys tc_kind
+ -- if we have a FullKindSignature, the tc_kind may already
+ -- be generalized. The kvs get matched up while kind-checking
+ -- the types in kc_tv, below
+ (arg_ks, _res_k) = splitKindFunTysN (length hs_tvs) mono_kind
-- There should be enough arrows, because
-- getInitialKinds used the tcdTyVars
; name_ks <- zipWithM kc_tv hs_tvs arg_ks
@@ -1297,6 +1134,11 @@ tcTyClTyVars tycon (HsQTvs { hsq_kvs = hs_kvs, hsq_tvs = hs_tvs }) thing_inside
; tvs <- zipWithM tc_hs_tv hs_tvs kinds
; tcExtendTyVarEnv tvs (thing_inside (kvs ++ tvs) res) }
where
+ -- In the case of associated types, the renamer has
+ -- ensured that the names are in commmon
+ -- e.g. class C a_29 where
+ -- type T b_30 a_29 :: *
+ -- Here the a_29 is shared
tc_hs_tv (L _ (UserTyVar n)) kind = return (mkTyVar n kind)
tc_hs_tv (L _ (KindedTyVar n hs_k)) kind = do { tc_kind <- tcLHsKind hs_k
; checkKind kind tc_kind
@@ -1313,21 +1155,20 @@ tcDataKindSig kind
= do { checkTc (isLiftedTypeKind res_kind) (badKindSig kind)
; span <- getSrcSpanM
; us <- newUniqueSupply
+ ; rdr_env <- getLocalRdrEnv
; let uniqs = uniqsFromSupply us
- ; return [ mk_tv span uniq str kind
- | ((kind, str), uniq) <- arg_kinds `zip` dnames `zip` uniqs ] }
+ occs = [ occ | str <- allNameStrings
+ , let occ = mkOccName tvName str
+ , isNothing (lookupLocalRdrOcc rdr_env occ) ]
+ -- Note [Avoid name clashes for associated data types]
+
+ ; return [ mk_tv span uniq occ kind
+ | ((kind, occ), uniq) <- arg_kinds `zip` occs `zip` uniqs ] }
where
(arg_kinds, res_kind) = splitKindFunTys kind
- mk_tv loc uniq str kind = mkTyVar name kind
- where
- name = mkInternalName uniq occ loc
- occ = mkOccName tvName str
+ mk_tv loc uniq occ kind
+ = mkTyVar (mkInternalName uniq occ loc) kind
- dnames = map ('$' :) names -- Note [Avoid name clashes for associated data types]
-
- names :: [String]
- names = [ c:cs | cs <- "" : names, c <- ['a'..'z'] ]
-
badKindSig :: Kind -> SDoc
badKindSig kind
= hang (ptext (sLit "Kind signature on data type declaration has non-* return kind"))
@@ -1338,19 +1179,17 @@ Note [Avoid name clashes for associated data types]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider class C a b where
data D b :: * -> *
-When typechecking the decl for D, we'll invent an extra type variable for D,
-to fill out its kind. We *don't* want this type variable to be 'a', because
-in an .hi file we'd get
+When typechecking the decl for D, we'll invent an extra type variable
+for D, to fill out its kind. Ideally we don't want this type variable
+to be 'a', because when pretty printing we'll get
class C a b where
- data D b a
-which makes it look as if there are *two* type indices. But there aren't!
-So we use $a instead, which cannot clash with a user-written type variable.
-Remember that type variable binders in interface files are just FastStrings,
-not proper Names.
-
-(The tidying phase can't help here because we don't tidy TyCons. Another
-alternative would be to record the number of indexing parameters in the
-interface file.)
+ data D b a0
+(NB: the tidying happens in the conversion to IfaceSyn, which happens
+as part of pretty-printing a TyThing.)
+
+That's why we look in the LocalRdrEnv to see what's in scope. This is
+important only to get nice-looking output when doing ":info C" in GHCi.
+It isn't essential for correctness.
%************************************************************************
diff --git a/compiler/typecheck/TcInstDcls.lhs b/compiler/typecheck/TcInstDcls.lhs
index 21af9a6e82d3..2b123ffab6d6 100644
--- a/compiler/typecheck/TcInstDcls.lhs
+++ b/compiler/typecheck/TcInstDcls.lhs
@@ -6,7 +6,8 @@
TcInstDecls: Typechecking instance declarations
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -37,6 +38,7 @@ import TcDeriv
import TcEnv
import TcHsType
import TcUnify
+import Coercion ( pprCoAxiom )
import MkCore ( nO_METHOD_BINDING_ERROR_ID )
import Type
import TcEvidence
@@ -49,8 +51,8 @@ import VarEnv
import VarSet
import CoreUnfold ( mkDFunUnfolding )
import CoreSyn ( Expr(Var, Type), CoreExpr, mkTyApps, mkVarApps )
-import PrelNames ( tYPEABLE_INTERNAL, typeableClassName, oldTypeableClassNames )
-
+import PrelNames ( tYPEABLE_INTERNAL, typeableClassName,
+ oldTypeableClassNames, genericClassNames )
import Bag
import BasicTypes
import DynFlags
@@ -68,6 +70,7 @@ import BooleanFormula ( isUnsatisfied, pprBooleanFormulaNice )
import Control.Monad
import Maybes ( isNothing, isJust, whenIsJust )
+import Data.List ( mapAccumL )
\end{code}
Typechecking instance declarations is done in two passes. The first
@@ -412,13 +415,17 @@ tcInstDecls1 tycl_decls inst_decls deriv_decls
-- hand written instances of old Typeable as then unsafe casts could be
-- performed. Derived instances are OK.
; dflags <- getDynFlags
- ; when (safeLanguageOn dflags) $
- mapM_ (\x -> when (typInstCheck x)
- (addErrAt (getSrcSpan $ iSpec x) typInstErr))
- local_infos
+ ; when (safeLanguageOn dflags) $ forM_ local_infos $ \x -> case x of
+ _ | typInstCheck x -> addErrAt (getSrcSpan $ iSpec x) (typInstErr x)
+ _ | genInstCheck x -> addErrAt (getSrcSpan $ iSpec x) (genInstErr x)
+ _ -> return ()
+
-- As above but for Safe Inference mode.
- ; when (safeInferOn dflags) $
- mapM_ (\x -> when (typInstCheck x) recordUnsafeInfer) local_infos
+ ; when (safeInferOn dflags) $ forM_ local_infos $ \x -> case x of
+ _ | typInstCheck x -> recordUnsafeInfer
+ _ | genInstCheck x -> recordUnsafeInfer
+ _ | overlapCheck x -> recordUnsafeInfer
+ _ -> return ()
; return ( gbl_env
, bagToList deriv_inst_info ++ local_infos
@@ -439,8 +446,18 @@ tcInstDecls1 tycl_decls inst_decls deriv_decls
else (typeableInsts, i:otherInsts)
typInstCheck ty = is_cls_nm (iSpec ty) `elem` oldTypeableClassNames
- typInstErr = ptext $ sLit $ "Can't create hand written instances of Typeable in Safe"
- ++ " Haskell! Can only derive them"
+ typInstErr i = hang (ptext (sLit $ "Typeable instances can only be "
+ ++ "derived in Safe Haskell.") $+$
+ ptext (sLit "Replace the following instance:"))
+ 2 (pprInstanceHdr (iSpec i))
+
+ overlapCheck ty = overlapMode (is_flag $ iSpec ty) `elem`
+ [Overlappable, Overlapping, Overlaps]
+ genInstCheck ty = is_cls_nm (iSpec ty) `elem` genericClassNames
+ genInstErr i = hang (ptext (sLit $ "Generic instances can only be "
+ ++ "derived in Safe Haskell.") $+$
+ ptext (sLit "Replace the following instance:"))
+ 2 (pprInstanceHdr (iSpec i))
instMsg i = hang (ptext (sLit $ "Typeable instances can only be derived; replace "
++ "the following instance:"))
@@ -504,6 +521,7 @@ tcLocalInstDecl (L loc (ClsInstD { cid_inst = decl }))
tcClsInstDecl :: LClsInstDecl Name -> TcM ([InstInfo Name], [FamInst])
tcClsInstDecl (L loc (ClsInstDecl { cid_poly_ty = poly_ty, cid_binds = binds
, cid_sigs = uprags, cid_tyfam_insts = ats
+ , cid_overlap_mode = overlap_mode
, cid_datafam_insts = adts }))
= setSrcSpan loc $
addErrCtxt (instDeclCtxt1 poly_ty) $
@@ -525,44 +543,20 @@ tcClsInstDecl (L loc (ClsInstDecl { cid_poly_ty = poly_ty, cid_binds = binds
-- Check for missing associated types and build them
-- from their defaults (if available)
- ; let defined_ats = mkNameSet $ map (tyFamInstDeclName . unLoc) ats
- defined_adts = mkNameSet $ map (unLoc . dfid_tycon . unLoc) adts
-
- mk_deflt_at_instances :: ClassATItem -> TcM [FamInst]
- mk_deflt_at_instances (fam_tc, defs)
- -- User supplied instances ==> everything is OK
- | tyConName fam_tc `elemNameSet` defined_ats
- || tyConName fam_tc `elemNameSet` defined_adts
- = return []
-
- -- No defaults ==> generate a warning
- | null defs
- = do { warnMissingMethodOrAT "associated type" (tyConName fam_tc)
- ; return [] }
-
- -- No user instance, have defaults ==> instatiate them
- -- Example: class C a where { type F a b :: *; type F a b = () }
- -- instance C [x]
- -- Then we want to generate the decl: type F [x] b = ()
- | otherwise
- = forM defs $ \(CoAxBranch { cab_lhs = pat_tys, cab_rhs = rhs }) ->
- do { let pat_tys' = substTys mini_subst pat_tys
- rhs' = substTy mini_subst rhs
- tv_set' = tyVarsOfTypes pat_tys'
- tvs' = varSetElems tv_set'
- ; rep_tc_name <- newFamInstTyConName (noLoc (tyConName fam_tc)) pat_tys'
- ; let axiom = mkSingleCoAxiom rep_tc_name tvs' fam_tc pat_tys' rhs'
- ; ASSERT( tyVarsOfType rhs' `subVarSet` tv_set' )
- newFamInst SynFamilyInst axiom }
-
- ; tyfam_insts1 <- mapM mk_deflt_at_instances (classATItems clas)
+ ; let defined_ats = mkNameSet (map (tyFamInstDeclName . unLoc) ats)
+ `unionNameSets`
+ mkNameSet (map (unLoc . dfid_tycon . unLoc) adts)
+ ; tyfam_insts1 <- mapM (tcATDefault mini_subst defined_ats)
+ (classATItems clas)
-- Finally, construct the Core representation of the instance.
-- (This no longer includes the associated types.)
; dfun_name <- newDFunName clas inst_tys (getLoc poly_ty)
-- Dfun location is that of instance *header*
- ; overlap_flag <- getOverlapFlag
+ ; overlap_flag <-
+ do defaultOverlapFlag <- getOverlapFlag
+ return $ setOverlapModeMaybe defaultOverlapFlag overlap_mode
; (subst, tyvars') <- tcInstSkolTyVars tyvars
; let dfun = mkDictFunId dfun_name tyvars theta clas inst_tys
ispec = mkLocalInstance dfun overlap_flag tyvars' clas (substTys subst inst_tys)
@@ -572,10 +566,53 @@ tcClsInstDecl (L loc (ClsInstDecl { cid_poly_ty = poly_ty, cid_binds = binds
, iBinds = InstBindings
{ ib_binds = binds
, ib_pragmas = uprags
+ , ib_extensions = []
, ib_standalone_deriving = False } }
; return ( [inst_info], tyfam_insts0 ++ concat tyfam_insts1 ++ datafam_insts) }
+
+tcATDefault :: TvSubst -> NameSet -> ClassATItem -> TcM [FamInst]
+-- ^ Construct default instances for any associated types that
+-- aren't given a user definition
+-- Returns [] or singleton
+tcATDefault inst_subst defined_ats (ATI fam_tc defs)
+ -- User supplied instances ==> everything is OK
+ | tyConName fam_tc `elemNameSet` defined_ats
+ = return []
+
+ -- No user instance, have defaults ==> instatiate them
+ -- Example: class C a where { type F a b :: *; type F a b = () }
+ -- instance C [x]
+ -- Then we want to generate the decl: type F [x] b = ()
+ | Just rhs_ty <- defs
+ = do { let (subst', pat_tys') = mapAccumL subst_tv inst_subst
+ (tyConTyVars fam_tc)
+ rhs' = substTy subst' rhs_ty
+ tv_set' = tyVarsOfTypes pat_tys'
+ tvs' = varSetElemsKvsFirst tv_set'
+ ; rep_tc_name <- newFamInstTyConName (noLoc (tyConName fam_tc)) pat_tys'
+ ; let axiom = mkSingleCoAxiom rep_tc_name tvs' fam_tc pat_tys' rhs'
+ ; traceTc "mk_deflt_at_instance" (vcat [ ppr fam_tc, ppr rhs_ty
+ , pprCoAxiom axiom ])
+ ; fam_inst <- ASSERT( tyVarsOfType rhs' `subVarSet` tv_set' )
+ newFamInst SynFamilyInst axiom
+ ; return [fam_inst] }
+
+ -- No defaults ==> generate a warning
+ | otherwise -- defs = Nothing
+ = do { warnMissingMethodOrAT "associated type" (tyConName fam_tc)
+ ; return [] }
+ where
+ subst_tv subst tc_tv
+ | Just ty <- lookupVarEnv (getTvSubstEnv subst) tc_tv
+ = (subst, ty)
+ | otherwise
+ = (extendTvSubst subst tc_tv ty', ty')
+ where
+ ty' = mkTyVarTy (updateTyVarKind (substTy subst) tc_tv)
+
+
--------------
tcAssocTyDecl :: Class -- Class of associated type
-> VarEnv Type -- Instantiation of class TyVars
@@ -624,24 +661,22 @@ tcTyFamInstDecl :: Maybe (Class, VarEnv Type) -- the class & mini_env if applica
tcTyFamInstDecl mb_clsinfo (L loc decl@(TyFamInstDecl { tfid_eqn = eqn }))
= setSrcSpan loc $
tcAddTyFamInstCtxt decl $
- do { let fam_lname = tfie_tycon (unLoc eqn)
+ do { let fam_lname = tfe_tycon (unLoc eqn)
; fam_tc <- tcFamInstDeclCombined mb_clsinfo fam_lname
-- (0) Check it's an open type family
- ; checkTc (isFamilyTyCon fam_tc) (notFamily fam_tc)
- ; checkTc (isSynTyCon fam_tc) (wrongKindOfFamily fam_tc)
- ; checkTc (isOpenSynFamilyTyCon fam_tc)
- (notOpenFamily fam_tc)
+ ; checkTc (isFamilyTyCon fam_tc) (notFamily fam_tc)
+ ; checkTc (isSynFamilyTyCon fam_tc) (wrongKindOfFamily fam_tc)
+ ; checkTc (isOpenSynFamilyTyCon fam_tc) (notOpenFamily fam_tc)
-- (1) do the work of verifying the synonym group
- ; co_ax_branch <- tcSynFamInstDecl fam_tc decl
+ ; co_ax_branch <- tcTyFamInstEqn (famTyConShape fam_tc) eqn
-- (2) check for validity
; checkValidTyFamInst mb_clsinfo fam_tc co_ax_branch
-- (3) construct coercion axiom
- ; rep_tc_name <- newFamInstAxiomName loc
- (tyFamInstDeclName decl)
+ ; rep_tc_name <- newFamInstAxiomName loc (unLoc fam_lname)
[co_ax_branch]
; let axiom = mkUnbranchedCoAxiom rep_tc_name fam_tc co_ax_branch
; newFamInst SynFamilyInst axiom }
@@ -664,7 +699,7 @@ tcDataFamInstDecl mb_clsinfo
; checkTc (isAlgTyCon fam_tc) (wrongKindOfFamily fam_tc)
-- Kind check type patterns
- ; tcFamTyPats (unLoc fam_tc_name) (tyConKind fam_tc) pats
+ ; tcFamTyPats (famTyConShape fam_tc) pats
(kcDataDefn defn) $
\tvs' pats' res_kind -> do
@@ -679,7 +714,7 @@ tcDataFamInstDecl mb_clsinfo
; checkTc (isLiftedTypeKind res_kind) $ tooFewParmsErr (tyConArity fam_tc)
; stupid_theta <- tcHsContext ctxt
- ; h98_syntax <- dataDeclChecks (tyConName fam_tc) new_or_data stupid_theta cons
+ ; gadt_syntax <- dataDeclChecks (tyConName fam_tc) new_or_data stupid_theta cons
-- Construct representation tycon
; rep_tc_name <- newFamInstTyConName fam_tc_name pats'
@@ -702,7 +737,7 @@ tcDataFamInstDecl mb_clsinfo
rep_tc = buildAlgTyCon rep_tc_name tvs' roles cType stupid_theta tc_rhs
Recursive
False -- No promotable to the kind level
- h98_syntax parent
+ gadt_syntax parent
-- We always assume that indexed types are recursive. Why?
-- (1) Due to their open nature, we can never be sure that a
-- further instance might not introduce a new recursive
@@ -887,9 +922,9 @@ tcInstDecl2 (InstInfo { iSpec = ispec, iBinds = ibinds })
, abs_ev_vars = dfun_ev_vars
, abs_exports = [export]
, abs_ev_binds = sc_binds
- , abs_binds = unitBag (Generated, dict_bind) }
+ , abs_binds = unitBag dict_bind }
- ; return (unitBag (Generated, L loc main_bind) `unionBags`
+ ; return (unitBag (L loc main_bind) `unionBags`
listToBag meth_binds)
}
where
@@ -1168,22 +1203,26 @@ tcInstanceMethods :: DFunId -> Class -> [TcTyVar]
-> ([Located TcSpecPrag], PragFun)
-> [(Id, DefMeth)]
-> InstBindings Name
- -> TcM ([Id], [(Origin, LHsBind Id)])
+ -> TcM ([Id], [LHsBind Id])
-- The returned inst_meth_ids all have types starting
-- forall tvs. theta => ...
tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
(spec_inst_prags, prag_fn)
op_items (InstBindings { ib_binds = binds
, ib_pragmas = sigs
+ , ib_extensions = exts
, ib_standalone_deriving
= standalone_deriv })
= do { traceTc "tcInstMeth" (ppr sigs $$ ppr binds)
; let hs_sig_fn = mkHsSigFun sigs
; checkMinimalDefinition
- ; mapAndUnzipM (tc_item hs_sig_fn) op_items }
+ ; set_exts exts $ mapAndUnzipM (tc_item hs_sig_fn) op_items }
where
+ set_exts :: [ExtensionFlag] -> TcM a -> TcM a
+ set_exts es thing = foldr setXOptM thing es
+
----------------------
- tc_item :: HsSigFun -> (Id, DefMeth) -> TcM (Id, (Origin, LHsBind Id))
+ tc_item :: HsSigFun -> (Id, DefMeth) -> TcM (Id, LHsBind Id)
tc_item sig_fn (sel_id, dm_info)
= case findMethodBind (idName sel_id) binds of
Just (user_bind, bndr_loc)
@@ -1192,10 +1231,10 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
; tc_default sig_fn sel_id dm_info }
----------------------
- tc_body :: HsSigFun -> Id -> Bool -> (Origin, LHsBind Name)
- -> SrcSpan -> TcM (TcId, (Origin, LHsBind Id))
+ tc_body :: HsSigFun -> Id -> Bool -> LHsBind Name
+ -> SrcSpan -> TcM (TcId, LHsBind Id)
tc_body sig_fn sel_id generated_code rn_bind bndr_loc
- = add_meth_ctxt sel_id generated_code (snd rn_bind) $
+ = add_meth_ctxt sel_id generated_code rn_bind $
do { traceTc "tc_item" (ppr sel_id <+> ppr (idType sel_id))
; (meth_id, local_meth_sig) <- setSrcSpan bndr_loc $
mkMethIds sig_fn clas tyvars dfun_ev_vars
@@ -1211,12 +1250,12 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
; return (meth_id1, bind) }
----------------------
- tc_default :: HsSigFun -> Id -> DefMeth -> TcM (TcId, (Origin, LHsBind Id))
+ tc_default :: HsSigFun -> Id -> DefMeth -> TcM (TcId, LHsBind Id)
tc_default sig_fn sel_id (GenDefMeth dm_name)
= do { meth_bind <- mkGenericDefMethBind clas inst_tys sel_id dm_name
; tc_body sig_fn sel_id False {- Not generated code? -}
- (Generated, meth_bind) inst_loc }
+ meth_bind inst_loc }
tc_default sig_fn sel_id NoDefMeth -- No default method at all
= do { traceTc "tc_def: warn" (ppr sel_id)
@@ -1224,8 +1263,8 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
inst_tys sel_id
; dflags <- getDynFlags
; return (meth_id,
- (Generated, mkVarBind meth_id $
- mkLHsWrap lam_wrapper (error_rhs dflags))) }
+ mkVarBind meth_id $
+ mkLHsWrap lam_wrapper (error_rhs dflags)) }
where
error_rhs dflags = L inst_loc $ HsApp error_fun (error_msg dflags)
error_fun = L inst_loc $ wrapId (WpTyApp meth_tau) nO_METHOD_BINDING_ERROR_ID
@@ -1267,13 +1306,13 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys
bind = AbsBinds { abs_tvs = tyvars, abs_ev_vars = dfun_ev_vars
, abs_exports = [export]
, abs_ev_binds = EvBinds (unitBag self_ev_bind)
- , abs_binds = unitBag (Generated, meth_bind) }
+ , abs_binds = unitBag meth_bind }
-- Default methods in an instance declaration can't have their own
-- INLINE or SPECIALISE pragmas. It'd be possible to allow them, but
-- currently they are rejected with
-- "INLINE pragma lacks an accompanying binding"
- ; return (meth_id1, (Generated, L inst_loc bind)) }
+ ; return (meth_id1, L inst_loc bind) }
----------------------
mk_meth_spec_prags :: Id -> [LTcSpecPrag] -> TcSpecPrags
@@ -1324,7 +1363,7 @@ mkGenericDefMethBind clas inst_tys sel_id dm_name
(vcat [ppr clas <+> ppr inst_tys,
nest 2 (ppr sel_id <+> equals <+> ppr rhs)]))
- ; return (noLoc $ mkTopFunBind (noLoc (idName sel_id))
+ ; return (noLoc $ mkTopFunBind Generated (noLoc (idName sel_id))
[mkSimpleMatch [] rhs]) }
where
rhs = nlHsVar dm_name
diff --git a/compiler/typecheck/TcInteract.lhs b/compiler/typecheck/TcInteract.lhs
index 377cd2dbd812..33249f4b0400 100644
--- a/compiler/typecheck/TcInteract.lhs
+++ b/compiler/typecheck/TcInteract.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcInteract (
solveInteractGiven, -- Solves [EvVar],GivenLoc
solveInteract, -- Solves Cts
@@ -101,6 +103,7 @@ solveInteractGiven loc old_fsks givens
, ctev_loc = loc }
| ev_id <- givens ]
+ -- See Note [Given flatten-skolems] in TcSMonad
fsk_bag = listToBag [ mkNonCanonical $ CtGiven { ctev_evtm = EvCoercion (mkTcNomReflCo tv_ty)
, ctev_pred = pred
, ctev_loc = loc }
@@ -750,12 +753,16 @@ kickOutRewritable :: CtEvidence -- Flavour of the equality that is
-> InertCans
-> TcS (Int, InertCans)
kickOutRewritable new_ev new_tv
- (IC { inert_eqs = tv_eqs
- , inert_dicts = dictmap
- , inert_funeqs = funeqmap
- , inert_irreds = irreds
- , inert_insols = insols
- , inert_no_eqs = no_eqs })
+ inert_cans@(IC { inert_eqs = tv_eqs
+ , inert_dicts = dictmap
+ , inert_funeqs = funeqmap
+ , inert_irreds = irreds
+ , inert_insols = insols
+ , inert_no_eqs = no_eqs })
+ | new_tv `elemVarEnv` tv_eqs -- Fast path: there is at least one equality for tv
+ -- so kick-out will do nothing
+ = return (0, inert_cans)
+ | otherwise
= do { traceTcS "kickOutRewritable" $
vcat [ text "tv = " <+> ppr new_tv
, ptext (sLit "Kicked out =") <+> ppr kicked_out]
@@ -1478,7 +1485,9 @@ doTopReactDict inerts fl cls xis
= do { instEnvs <- getInstEnvs
; let fd_eqns = improveFromInstEnv instEnvs pred
; fd_work <- rewriteWithFunDeps fd_eqns loc
- ; unless (null fd_work) (updWorkListTcS (extendWorkListEqs fd_work))
+ ; unless (null fd_work) $
+ do { traceTcS "Addig FD work" (ppr pred $$ vcat (map pprEquation fd_eqns) $$ ppr fd_work)
+ ; updWorkListTcS (extendWorkListEqs fd_work) }
; return NoTopInt }
--------------------
@@ -1830,7 +1839,7 @@ matchClassInst _ clas [ ty ] _
matchClassInst _ clas [ _k, ty1, ty2 ] loc
| clas == coercibleClass = do
- traceTcS "matchClassInst for" $ ppr clas <+> ppr ty1 <+> ppr ty2 <+> text "at depth" <+> ppr (ctLocDepth loc)
+ traceTcS "matchClassInst for" $ quotes (pprClassPred clas [ty1,ty2]) <+> text "at depth" <+> ppr (ctLocDepth loc)
ev <- getCoercibleInst loc ty1 ty2
traceTcS "matchClassInst returned" $ ppr ev
return ev
@@ -1922,11 +1931,12 @@ getCoercibleInst loc ty1 ty2 = do
-- Get some global stuff in scope, for nice pattern-guard based code in `go`
rdr_env <- getGlobalRdrEnvTcS
famenv <- getFamInstEnvs
- safeMode <- safeLanguageOn `fmap` getDynFlags
- go safeMode famenv rdr_env
+ go famenv rdr_env
where
- go :: Bool -> FamInstEnvs -> GlobalRdrEnv -> TcS LookupInstResult
- go safeMode famenv rdr_env
+ go :: FamInstEnvs -> GlobalRdrEnv -> TcS LookupInstResult
+ go famenv rdr_env
+ -- Also see [Order of Coercible Instances]
+
-- Coercible a a (see case 1 in [Coercible Instances])
| ty1 `tcEqType` ty2
= do return $ GenInst []
@@ -1942,15 +1952,36 @@ getCoercibleInst loc ty1 ty2 = do
ev_term <- deferTcSForAllEq Representational loc (tvs1,body1) (tvs2,body2)
return $ GenInst [] ev_term
- -- Coercible (D ty1 ty2) (D ty1' ty2') (see case 3 in [Coercible Instances])
+ -- Coercible NT a (see case 3 in [Coercible Instances])
+ | Just (tc,tyArgs) <- splitTyConApp_maybe ty1,
+ Just (concTy, ntCo) <- instNewTyConTF_maybe famenv tc tyArgs,
+ dataConsInScope rdr_env tc -- Do not look at all tyConsOfTyCon
+ = do markDataConsAsUsed rdr_env tc
+ ct_ev <- requestCoercible loc concTy ty2
+ local_var <- mkSysLocalM (fsLit "coev") $ mkCoerciblePred concTy ty2
+ let binds = EvBinds (unitBag (EvBind local_var (getEvTerm ct_ev)))
+ tcCo = TcLetCo binds $
+ coercionToTcCoercion ntCo `mkTcTransCo` mkTcCoVarCo local_var
+ return $ GenInst (freshGoals [ct_ev]) (EvCoercion tcCo)
+
+ -- Coercible a NT (see case 3 in [Coercible Instances])
+ | Just (tc,tyArgs) <- splitTyConApp_maybe ty2,
+ Just (concTy, ntCo) <- instNewTyConTF_maybe famenv tc tyArgs,
+ dataConsInScope rdr_env tc -- Do not look at all tyConsOfTyCon
+ = do markDataConsAsUsed rdr_env tc
+ ct_ev <- requestCoercible loc ty1 concTy
+ local_var <- mkSysLocalM (fsLit "coev") $ mkCoerciblePred ty1 concTy
+ let binds = EvBinds (unitBag (EvBind local_var (getEvTerm ct_ev)))
+ tcCo = TcLetCo binds $
+ mkTcCoVarCo local_var `mkTcTransCo` mkTcSymCo (coercionToTcCoercion ntCo)
+ return $ GenInst (freshGoals [ct_ev]) (EvCoercion tcCo)
+
+ -- Coercible (D ty1 ty2) (D ty1' ty2') (see case 4 in [Coercible Instances])
| Just (tc1,tyArgs1) <- splitTyConApp_maybe ty1,
Just (tc2,tyArgs2) <- splitTyConApp_maybe ty2,
tc1 == tc2,
- nominalArgsAgree tc1 tyArgs1 tyArgs2,
- not safeMode || all (dataConsInScope rdr_env) (tyConsOfTyCon tc1)
- = do -- Mark all used data constructors as used
- when safeMode $ mapM_ (markDataConsAsUsed rdr_env) (tyConsOfTyCon tc1)
- -- We want evidence for all type arguments of role R
+ nominalArgsAgree tc1 tyArgs1 tyArgs2
+ = do -- We want evidence for all type arguments of role R
arg_stuff <- forM (zip3 (tyConRoles tc1) tyArgs1 tyArgs2) $ \(r,ta1,ta2) ->
case r of Nominal -> do
return
@@ -1976,30 +2007,6 @@ getCoercibleInst loc ty1 ty2 = do
tcCo = TcLetCo binds (mkTcTyConAppCo Representational tc1 arg_cos)
return $ GenInst (catMaybes arg_new) (EvCoercion tcCo)
- -- Coercible NT a (see case 4 in [Coercible Instances])
- | Just (tc,tyArgs) <- splitTyConApp_maybe ty1,
- Just (concTy, ntCo) <- instNewTyConTF_maybe famenv tc tyArgs,
- dataConsInScope rdr_env tc -- Do not look at all tyConsOfTyCon
- = do markDataConsAsUsed rdr_env tc
- ct_ev <- requestCoercible loc concTy ty2
- local_var <- mkSysLocalM (fsLit "coev") $ mkCoerciblePred concTy ty2
- let binds = EvBinds (unitBag (EvBind local_var (getEvTerm ct_ev)))
- tcCo = TcLetCo binds $
- coercionToTcCoercion ntCo `mkTcTransCo` mkTcCoVarCo local_var
- return $ GenInst (freshGoals [ct_ev]) (EvCoercion tcCo)
-
- -- Coercible a NT (see case 4 in [Coercible Instances])
- | Just (tc,tyArgs) <- splitTyConApp_maybe ty2,
- Just (concTy, ntCo) <- instNewTyConTF_maybe famenv tc tyArgs,
- dataConsInScope rdr_env tc -- Do not look at all tyConsOfTyCon
- = do markDataConsAsUsed rdr_env tc
- ct_ev <- requestCoercible loc ty1 concTy
- local_var <- mkSysLocalM (fsLit "coev") $ mkCoerciblePred ty1 concTy
- let binds = EvBinds (unitBag (EvBind local_var (getEvTerm ct_ev)))
- tcCo = TcLetCo binds $
- mkTcCoVarCo local_var `mkTcTransCo` mkTcSymCo (coercionToTcCoercion ntCo)
- return $ GenInst (freshGoals [ct_ev]) (EvCoercion tcCo)
-
-- Cannot solve this one
| otherwise
= return NoInstance
@@ -2039,7 +2046,7 @@ Note [Coercible Instances]
The class Coercible is special: There are no regular instances, and the user
cannot even define them (it is listed as an `abstractClass` in TcValidity).
Instead, the type checker will create instances and their evidence out of thin
-air, in getCoercibleInst. The following “instances†are present:
+air, in getCoercibleInst. The following "instances" are present:
1. instance Coercible a a
for any type a at any kind k.
@@ -2048,33 +2055,14 @@ air, in getCoercibleInst. The following “instances†are present:
(which would be illegal to write like that in the source code, but we have
it nevertheless).
-
- 3. instance (Coercible t1_r t1'_r, Coercible t2_r t2_r',...) =>
- Coercible (C t1_r t2_r ... t1_p t2_p ... t1_n t2_n ...)
- (C t1_r' t2_r' ... t1_p' t2_p' ... t1_n t2_n ...)
- for a type constructor C where
- * the nominal type arguments are not changed,
- * the phantom type arguments may change arbitrarily
- * the representational type arguments are again Coercible
-
- The type constructor can be used undersaturated; then the Coercible
- instance is at a higher kind. This does not cause problems.
-
- Furthermore in Safe Haskell code, we check that
- * the data constructors of C are in scope and
- * the data constructors of all type constructors used in the definition of
- * C are in scope.
- This is required as otherwise the previous check can be circumvented by
- just adding a local data type around C.
-
- 4. instance Coercible r b => Coercible (NT t1 t2 ...) b
+ 3. instance Coercible r b => Coercible (NT t1 t2 ...) b
instance Coercible a r => Coercible a (NT t1 t2 ...)
for a newtype constructor NT (or data family instance that resolves to a
newtype) where
* r is the concrete type of NT, instantiated with the arguments t1 t2 ...
- * the constructor of NT are in scope.
+ * the constructor of NT is in scope.
- Again, the newtype TyCon can appear undersaturated, but only if it has
+ The newtype TyCon can appear undersaturated, but only if it has
enough arguments to apply the newtype coercion (which is eta-reduced). Examples:
newtype NT a = NT (Either a Int)
Coercible (NT Int) (Either Int Int) -- ok
@@ -2082,12 +2070,24 @@ air, in getCoercibleInst. The following “instances†are present:
newtype NT3 a b = NT3 (b -> a)
Coercible (NT2 Int) (NT3 Int) -- cannot be derived
+ 4. instance (Coercible t1_r t1'_r, Coercible t2_r t2_r',...) =>
+ Coercible (C t1_r t2_r ... t1_p t2_p ... t1_n t2_n ...)
+ (C t1_r' t2_r' ... t1_p' t2_p' ... t1_n t2_n ...)
+ for a type constructor C where
+ * the nominal type arguments are not changed,
+ * the phantom type arguments may change arbitrarily
+ * the representational type arguments are again Coercible
+
+ The type constructor can be used undersaturated; then the Coercible
+ instance is at a higher kind. This does not cause problems.
+
+
The type checker generates evidence in the form of EvCoercion, but the
TcCoercion therein has role Representational, which are turned into Core
coercions by dsEvTerm in DsBinds.
-The evidence for the first three instance is generated here by
-getCoercibleInst, for the second instance deferTcSForAllEq is used.
+The evidence for the second case is created by deferTcSForAllEq, for the other
+cases by getCoercibleInst.
When the constraint cannot be solved, it is treated as any other unsolved
constraint, i.e. it can turn up in an inferred type signature, or reported to
@@ -2096,6 +2096,33 @@ coercible_msg in TcErrors gives additional explanations of why GHC could not
find a Coercible instance, so it duplicates some of the logic from
getCoercibleInst (in negated form).
+Note [Order of Coercible Instances]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+At first glance, the order of the various coercible instances doesn't matter, as
+incoherence is no issue here: We do not care how the evidence is constructed,
+as long as it is.
+
+But because of role annotations, the order *can* matter:
+
+ newtype T a = MkT [a]
+ type role T nominal
+
+ type family F a
+ type instance F Int = Bool
+
+Here T's declared role is more restrictive than its inferred role
+(representational) would be. If MkT is not in scope, so that the
+newtype-unwrapping instance is not available, then this coercible
+instance would fail:
+ Coercible (T Bool) (T (F Int)
+But MkT was in scope, *and* if we used it before decomposing on T,
+we'd unwrap the newtype (on both sides) to get
+ Coercible Bool (F Int)
+whic succeeds.
+
+So our current decision is to apply case 3 (newtype-unwrapping) first,
+followed by decomposition (case 4). This is strictly more powerful
+if the newtype constructor is in scope. See Trac #9117 for a discussion.
Note [Instance and Given overlap]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/compiler/typecheck/TcMType.lhs b/compiler/typecheck/TcMType.lhs
index a40a93df1605..65bc0b76533b 100644
--- a/compiler/typecheck/TcMType.lhs
+++ b/compiler/typecheck/TcMType.lhs
@@ -1,4 +1,4 @@
-%
+o%
% (c) The University of Glasgow 2006
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
@@ -9,7 +9,8 @@ This module contains monadic operations over types that contain
mutable type variables
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -40,23 +41,23 @@ module TcMType (
--------------------------------
-- Instantiation
- tcInstTyVars, tcInstSigTyVars, newSigTyVar,
- tcInstType,
- tcInstSkolTyVars, tcInstSkolTyVarsLoc, tcInstSuperSkolTyVars,
- tcInstSkolTyVarsX, tcInstSuperSkolTyVarsX,
+ tcInstTyVars, newSigTyVar,
+ tcInstType,
+ tcInstSkolTyVars, tcInstSuperSkolTyVars,tcInstSuperSkolTyVarsX,
+ tcInstSigTyVarsLoc, tcInstSigTyVars,
tcInstSkolTyVar, tcInstSkolType,
tcSkolDFunType, tcSuperSkolTyVars,
--------------------------------
-- Zonking
zonkTcPredType,
- skolemiseSigTv, skolemiseUnboundMetaTyVar,
+ skolemiseUnboundMetaTyVar,
zonkTcTyVar, zonkTcTyVars, zonkTyVarsAndFV, zonkTcTypeAndFV,
zonkQuantifiedTyVar, quantifyTyVars,
zonkTcTyVarBndr, zonkTcType, zonkTcTypes, zonkTcThetaType,
zonkTcKind, defaultKindVarToStar,
- zonkEvVar, zonkWC, zonkId, zonkCt, zonkCts, zonkSkolemInfo,
+ zonkEvVar, zonkWC, zonkFlats, zonkId, zonkCt, zonkCts, zonkSkolemInfo,
tcGetGlobalTyVars,
) where
@@ -238,9 +239,6 @@ tcInstSkolTyVar loc overlappable subst tyvar
-- Wrappers
-- we need to be able to do this from outside the TcM monad:
-tcInstSkolTyVarsLoc :: SrcSpan -> [TyVar] -> TcRnIf gbl lcl (TvSubst, [TcTyVar])
-tcInstSkolTyVarsLoc loc = mapAccumLM (tcInstSkolTyVar loc False) (mkTopTvSubst [])
-
tcInstSkolTyVars :: [TyVar] -> TcM (TvSubst, [TcTyVar])
tcInstSkolTyVars = tcInstSkolTyVarsX (mkTopTvSubst [])
@@ -255,29 +253,26 @@ tcInstSuperSkolTyVarsX subst = tcInstSkolTyVars' True subst
tcInstSkolTyVars' :: Bool -> TvSubst -> [TyVar] -> TcM (TvSubst, [TcTyVar])
-- Precondition: tyvars should be ordered (kind vars first)
-- see Note [Kind substitution when instantiating]
+-- Get the location from the monad; this is a complete freshening operation
tcInstSkolTyVars' isSuperSkol subst tvs
= do { loc <- getSrcSpanM
; mapAccumLM (tcInstSkolTyVar loc isSuperSkol) subst tvs }
+tcInstSigTyVarsLoc :: SrcSpan -> [TyVar] -> TcRnIf gbl lcl (TvSubst, [TcTyVar])
+-- We specify the location
+tcInstSigTyVarsLoc loc = mapAccumLM (tcInstSkolTyVar loc False) (mkTopTvSubst [])
+
+tcInstSigTyVars :: [TyVar] -> TcRnIf gbl lcl (TvSubst, [TcTyVar])
+-- Get the location from the TyVar itself, not the monad
+tcInstSigTyVars = mapAccumLM inst_tv (mkTopTvSubst [])
+ where
+ inst_tv subst tv = tcInstSkolTyVar (getSrcSpan tv) False subst tv
+
tcInstSkolType :: TcType -> TcM ([TcTyVar], TcThetaType, TcType)
-- Instantiate a type with fresh skolem constants
-- Binding location comes from the monad
tcInstSkolType ty = tcInstType tcInstSkolTyVars ty
-tcInstSigTyVars :: [TyVar] -> TcM (TvSubst, [TcTyVar])
--- Make meta SigTv type variables for patten-bound scoped type varaibles
--- We use SigTvs for them, so that they can't unify with arbitrary types
--- Precondition: tyvars should be ordered (kind vars first)
--- see Note [Kind substitution when instantiating]
-tcInstSigTyVars = mapAccumLM tcInstSigTyVar (mkTopTvSubst [])
- -- The tyvars are freshly made, by tcInstSigTyVar
- -- So mkTopTvSubst [] is ok
-
-tcInstSigTyVar :: TvSubst -> TyVar -> TcM (TvSubst, TcTyVar)
-tcInstSigTyVar subst tv
- = do { new_tv <- newSigTyVar (tyVarName tv) (substTy subst (tyVarKind tv))
- ; return (extendTvSubst subst tv (mkTyVarTy new_tv), new_tv) }
-
newSigTyVar :: Name -> Kind -> TcM TcTyVar
newSigTyVar name kind
= do { uniq <- newUnique
@@ -391,34 +386,34 @@ writeMetaTyVar tyvar ty
--------------------
writeMetaTyVarRef :: TcTyVar -> TcRef MetaDetails -> TcType -> TcM ()
--- Here the tyvar is for error checking only;
+-- Here the tyvar is for error checking only;
-- the ref cell must be for the same tyvar
writeMetaTyVarRef tyvar ref ty
- | not debugIsOn
+ | not debugIsOn
= do { traceTc "writeMetaTyVar" (ppr tyvar <+> text ":=" <+> ppr ty)
; writeMutVar ref (Indirect ty) }
-- Everything from here on only happens if DEBUG is on
| otherwise
- = do { meta_details <- readMutVar ref;
+ = do { meta_details <- readMutVar ref;
-- Zonk kinds to allow the error check to work
- ; zonked_tv_kind <- zonkTcKind tv_kind
+ ; zonked_tv_kind <- zonkTcKind tv_kind
; zonked_ty_kind <- zonkTcKind ty_kind
-- Check for double updates
- ; ASSERT2( isFlexi meta_details,
+ ; ASSERT2( isFlexi meta_details,
hang (text "Double update of meta tyvar")
2 (ppr tyvar $$ ppr meta_details) )
traceTc "writeMetaTyVar" (ppr tyvar <+> text ":=" <+> ppr ty)
- ; writeMutVar ref (Indirect ty)
- ; when ( not (isPredTy tv_kind)
+ ; writeMutVar ref (Indirect ty)
+ ; when ( not (isPredTy tv_kind)
-- Don't check kinds for updates to coercion variables
&& not (zonked_ty_kind `tcIsSubKind` zonked_tv_kind))
$ WARN( True, hang (text "Ill-kinded update to meta tyvar")
- 2 ( ppr tyvar <+> text "::" <+> ppr tv_kind
- <+> text ":="
- <+> ppr ty <+> text "::" <+> ppr ty_kind) )
+ 2 ( ppr tyvar <+> text "::" <+> (ppr tv_kind $$ ppr zonked_tv_kind)
+ <+> text ":="
+ <+> ppr ty <+> text "::" <+> (ppr ty_kind $$ ppr zonked_ty_kind) ) )
(return ()) }
where
tv_kind = tyVarKind tyvar
@@ -598,17 +593,6 @@ skolemiseUnboundMetaTyVar tv details
; writeMetaTyVar tv (mkTyVarTy final_tv)
; return final_tv }
-
-skolemiseSigTv :: TcTyVar -> TcM TcTyVar
--- In TcBinds we create SigTvs for type signatures
--- but for singleton groups we want them to really be skolems
--- which do not unify with each other
-skolemiseSigTv tv
- = ASSERT2( isSigTyVar tv, ppr tv )
- do { writeMetaTyVarRef tv (metaTvRef tv) (mkTyVarTy skol_tv)
- ; return skol_tv }
- where
- skol_tv = setTcTyVarDetails tv (SkolemTv False)
\end{code}
Note [Zonking to Skolem]
diff --git a/compiler/typecheck/TcMatches.lhs b/compiler/typecheck/TcMatches.lhs
index 08ce7745d3c3..32b6d1e326e9 100644
--- a/compiler/typecheck/TcMatches.lhs
+++ b/compiler/typecheck/TcMatches.lhs
@@ -6,7 +6,8 @@
TcMatches: Typecheck some @Matches@
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, RankNTypes #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -109,7 +110,7 @@ tcMatchesCase :: (Outputable (body Name)) =>
tcMatchesCase ctxt scrut_ty matches res_ty
| isEmptyMatchGroup matches -- Allow empty case expressions
- = return (MG { mg_alts = [], mg_arg_tys = [scrut_ty], mg_res_ty = res_ty })
+ = return (MG { mg_alts = [], mg_arg_tys = [scrut_ty], mg_res_ty = res_ty, mg_origin = mg_origin matches })
| otherwise
= tcMatches ctxt [scrut_ty] res_ty matches
@@ -180,10 +181,10 @@ data TcMatchCtxt body -- c.f. TcStmtCtxt, also in this module
-> TcRhoType
-> TcM (Located (body TcId)) }
-tcMatches ctxt pat_tys rhs_ty (MG { mg_alts = matches })
+tcMatches ctxt pat_tys rhs_ty (MG { mg_alts = matches, mg_origin = origin })
= ASSERT( not (null matches) ) -- Ensure that rhs_ty is filled in
do { matches' <- mapM (tcMatch ctxt pat_tys rhs_ty) matches
- ; return (MG { mg_alts = matches', mg_arg_tys = pat_tys, mg_res_ty = rhs_ty }) }
+ ; return (MG { mg_alts = matches', mg_arg_tys = pat_tys, mg_res_ty = rhs_ty, mg_origin = origin }) }
-------------
tcMatch :: (Outputable (body Name)) => TcMatchCtxt body
diff --git a/compiler/typecheck/TcPat.lhs b/compiler/typecheck/TcPat.lhs
index ab6d7bd40c42..cfc76d653818 100644
--- a/compiler/typecheck/TcPat.lhs
+++ b/compiler/typecheck/TcPat.lhs
@@ -6,14 +6,16 @@
TcPat: Typechecking patterns
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, RankNTypes #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
-module TcPat ( tcLetPat, TcSigFun, TcSigInfo(..), TcPragFun
+module TcPat ( tcLetPat, TcSigFun, TcPragFun
+ , TcSigInfo(..), findScopedTyVars
, LetBndrSpec(..), addInlinePrags, warnPrags
, tcPat, tcPats, newNoSigLetBndr
, addDataConStupidTheta, badFieldCon, polyPatSig ) where
@@ -29,6 +31,7 @@ import Inst
import Id
import Var
import Name
+import NameSet
import TcEnv
--import TcExpr
import TcMType
@@ -146,8 +149,7 @@ data TcSigInfo
sig_tvs :: [(Maybe Name, TcTyVar)],
-- Instantiated type and kind variables
-- Just n <=> this skolem is lexically in scope with name n
- -- See Note [Kind vars in sig_tvs]
- -- See Note [More instantiated than scoped] in TcBinds
+ -- See Note [Binding scoped type variables]
sig_theta :: TcThetaType, -- Instantiated theta
@@ -157,21 +159,56 @@ data TcSigInfo
sig_loc :: SrcSpan -- The location of the signature
}
+findScopedTyVars -- See Note [Binding scoped type variables]
+ :: LHsType Name -- The HsType
+ -> TcType -- The corresponding Type:
+ -- uses same Names as the HsType
+ -> [TcTyVar] -- The instantiated forall variables of the Type
+ -> [(Maybe Name, TcTyVar)] -- In 1-1 correspondence with the instantiated vars
+findScopedTyVars hs_ty sig_ty inst_tvs
+ = zipWith find sig_tvs inst_tvs
+ where
+ find sig_tv inst_tv
+ | tv_name `elemNameSet` scoped_names = (Just tv_name, inst_tv)
+ | otherwise = (Nothing, inst_tv)
+ where
+ tv_name = tyVarName sig_tv
+
+ scoped_names = mkNameSet (hsExplicitTvs hs_ty)
+ (sig_tvs,_) = tcSplitForAllTys sig_ty
+
instance Outputable TcSigInfo where
ppr (TcSigInfo { sig_id = id, sig_tvs = tyvars, sig_theta = theta, sig_tau = tau})
= ppr id <+> dcolon <+> vcat [ pprSigmaType (mkSigmaTy (map snd tyvars) theta tau)
, ppr (map fst tyvars) ]
\end{code}
-Note [Kind vars in sig_tvs]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-With kind polymorphism a signature like
- f :: forall f a. f a -> f a
-may actuallly give rise to
- f :: forall k. forall (f::k -> *) (a:k). f a -> f a
-So the sig_tvs will be [k,f,a], but only f,a are scoped.
-So the scoped ones are not necessarily the *inital* ones!
-
+Note [Binding scoped type variables]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The type variables *brought into lexical scope* by a type signature may
+be a subset of the *quantified type variables* of the signatures, for two reasons:
+
+* With kind polymorphism a signature like
+ f :: forall f a. f a -> f a
+ may actuallly give rise to
+ f :: forall k. forall (f::k -> *) (a:k). f a -> f a
+ So the sig_tvs will be [k,f,a], but only f,a are scoped.
+ NB: the scoped ones are not necessarily the *inital* ones!
+
+* Even aside from kind polymorphism, tere may be more instantiated
+ type variables than lexically-scoped ones. For example:
+ type T a = forall b. b -> (a,b)
+ f :: forall c. T c
+ Here, the signature for f will have one scoped type variable, c,
+ but two instantiated type variables, c' and b'.
+
+The function findScopedTyVars takes
+ * hs_ty: the original HsForAllTy
+ * sig_ty: the corresponding Type (which is guaranteed to use the same Names
+ as the HsForAllTy)
+ * inst_tvs: the skolems instantiated from the forall's in sig_ty
+It returns a [(Maybe Name, TcTyVar)], in 1-1 correspondence with inst_tvs
+but with a (Just n) for the lexically scoped name of each in-scope tyvar.
Note [sig_tau may be polymorphic]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -495,9 +532,9 @@ tc_pat penv (TuplePat pats boxity _) pat_ty thing_inside
-- so that we can experiment with lazy tuple-matching.
-- This is a pretty odd place to make the switch, but
-- it was easy to do.
- ; let pat_ty' = mkTyConApp tc arg_tys
- -- pat_ty /= pat_ty iff coi /= IdCo
- unmangled_result = TuplePat pats' boxity pat_ty'
+ ; let
+ unmangled_result = TuplePat pats' boxity arg_tys
+ -- pat_ty /= pat_ty iff coi /= IdCo
possibly_mangled_result
| gopt Opt_IrrefutableTuples dflags &&
isBoxed boxity = LazyPat (noLoc unmangled_result)
@@ -694,13 +731,14 @@ tcDataConPat penv (L con_span con_name) data_con pat_ty arg_pats thing_inside
(zipTopTvSubst univ_tvs ctxt_res_tys) ex_tvs
-- Get location from monad, not from ex_tvs
- ; let pat_ty' = mkTyConApp tycon ctxt_res_tys
+ ; let -- pat_ty' = mkTyConApp tycon ctxt_res_tys
-- pat_ty' is type of the actual constructor application
-- pat_ty' /= pat_ty iff coi /= IdCo
-
+
arg_tys' = substTys tenv arg_tys
- ; traceTc "tcConPat" (ppr con_name $$ ppr ex_tvs' $$ ppr pat_ty' $$ ppr arg_tys')
+ ; traceTc "tcConPat" (vcat [ ppr con_name, ppr univ_tvs, ppr ex_tvs, ppr eq_spec
+ , ppr ex_tvs', ppr ctxt_res_tys, ppr arg_tys' ])
; if null ex_tvs && null eq_spec && null theta
then do { -- The common case; no class bindings etc
-- (see Note [Arrows and patterns])
@@ -710,7 +748,7 @@ tcDataConPat penv (L con_span con_name) data_con pat_ty arg_pats thing_inside
pat_tvs = [], pat_dicts = [],
pat_binds = emptyTcEvBinds,
pat_args = arg_pats',
- pat_ty = pat_ty',
+ pat_arg_tys = ctxt_res_tys,
pat_wrap = idHsWrapper }
; return (mkHsWrapPat wrap res_pat pat_ty, res) }
@@ -743,7 +781,7 @@ tcDataConPat penv (L con_span con_name) data_con pat_ty arg_pats thing_inside
pat_dicts = given,
pat_binds = ev_binds,
pat_args = arg_pats',
- pat_ty = pat_ty',
+ pat_arg_tys = ctxt_res_tys,
pat_wrap = idHsWrapper }
; return (mkHsWrapPat wrap res_pat pat_ty, res)
} }
@@ -753,11 +791,9 @@ tcPatSynPat :: PatEnv -> Located Name -> PatSyn
-> HsConPatDetails Name -> TcM a
-> TcM (Pat TcId, a)
tcPatSynPat penv (L con_span _) pat_syn pat_ty arg_pats thing_inside
- = do { let (univ_tvs, ex_tvs, (prov_theta, req_theta)) = patSynSig pat_syn
- arg_tys = patSynArgTys pat_syn
- ty = patSynType pat_syn
+ = do { let (univ_tvs, ex_tvs, prov_theta, req_theta, arg_tys, ty) = patSynSig pat_syn
- ; (_univ_tvs', inst_tys, subst) <- tcInstTyVars univ_tvs
+ ; (univ_tvs', inst_tys, subst) <- tcInstTyVars univ_tvs
; checkExistentials ex_tvs penv
; (tenv, ex_tvs') <- tcInstSuperSkolTyVarsX subst ex_tvs
@@ -777,14 +813,12 @@ tcPatSynPat penv (L con_span _) pat_syn pat_ty arg_pats thing_inside
; prov_dicts' <- newEvVars prov_theta'
- {-
+ -- Using a pattern synonym requires the PatternSynonyms
+ -- language flag to keep consistent with #2905
; patsyns_on <- xoptM Opt_PatternSynonyms
; checkTc patsyns_on
(ptext (sLit "A pattern match on a pattern synonym requires PatternSynonyms"))
- -- Trac #2905 decided that a *pattern-match* of a GADT
- -- should require the GADT language flag.
- -- Re TypeFamilies see also #7156
--}
+
; let skol_info = case pe_ctxt penv of
LamPat mc -> PatSkol (PatSynCon pat_syn) mc
LetPat {} -> UnkSkol -- Doesn't matter
@@ -803,7 +837,7 @@ tcPatSynPat penv (L con_span _) pat_syn pat_ty arg_pats thing_inside
pat_dicts = prov_dicts',
pat_binds = ev_binds,
pat_args = arg_pats',
- pat_ty = ty',
+ pat_arg_tys = mkTyVarTys univ_tvs',
pat_wrap = req_wrap }
; return (mkHsWrapPat wrap res_pat pat_ty, res) }
diff --git a/compiler/typecheck/TcPatSyn.lhs b/compiler/typecheck/TcPatSyn.lhs
index a126f0f85f19..b5fbc295f52f 100644
--- a/compiler/typecheck/TcPatSyn.lhs
+++ b/compiler/typecheck/TcPatSyn.lhs
@@ -5,7 +5,9 @@
\section[TcPatSyn]{Typechecking pattern synonym declarations}
\begin{code}
-module TcPatSyn (tcPatSynDecl) where
+{-# LANGUAGE CPP #-}
+
+module TcPatSyn (tcPatSynDecl, tcPatSynWrapper) where
import HsSyn
import TcPat
@@ -16,13 +18,13 @@ import TysPrim
import Name
import SrcLoc
import PatSyn
-import Maybes
import NameSet
import Panic
import Outputable
import FastString
import Var
import Id
+import IdInfo( IdDetails( VanillaId ) )
import TcBinds
import BasicTypes
import TcSimplify
@@ -32,43 +34,43 @@ import Data.Monoid
import Bag
import TcEvidence
import BuildTyCl
+import TypeRep
#include "HsVersions.h"
\end{code}
\begin{code}
-tcPatSynDecl :: Located Name
- -> HsPatSynDetails (Located Name)
- -> LPat Name
- -> HsPatSynDir Name
+tcPatSynDecl :: PatSynBind Name Name
-> TcM (PatSyn, LHsBinds Id)
-tcPatSynDecl lname@(L _ name) details lpat dir
+tcPatSynDecl PSB{ psb_id = lname@(L _ name), psb_args = details,
+ psb_def = lpat, psb_dir = dir }
= do { traceTc "tcPatSynDecl {" $ ppr name $$ ppr lpat
+ ; tcCheckPatSynPat lpat
; pat_ty <- newFlexiTyVarTy openTypeKind
; let (arg_names, is_infix) = case details of
- PrefixPatSyn names -> (map unLoc names, False)
+ PrefixPatSyn names -> (map unLoc names, False)
InfixPatSyn name1 name2 -> (map unLoc [name1, name2], True)
- ; ((lpat', args), wanted) <- captureConstraints $
- tcPat PatSyn lpat pat_ty $ mapM tcLookupId arg_names
+ ; ((lpat', args), wanted) <- captureConstraints $
+ tcPat PatSyn lpat pat_ty $
+ mapM tcLookupId arg_names
; let named_taus = (name, pat_ty):map (\arg -> (getName arg, varType arg)) args
; traceTc "tcPatSynDecl::wanted" (ppr named_taus $$ ppr wanted)
- ; (qtvs, given_dicts, _mr_bites, ev_binds) <- simplifyInfer True False named_taus wanted
- ; let req_dicts = given_dicts
+ ; (qtvs, req_dicts, _mr_bites, ev_binds) <- simplifyInfer True False named_taus wanted
; (ex_vars, prov_dicts) <- tcCollectEx lpat'
- ; let univ_tvs = filter (not . (`elemVarSet` ex_vars)) qtvs
- ex_tvs = varSetElems ex_vars
-
- ; pat_ty <- zonkTcType pat_ty
- ; args <- mapM zonkId args
+ ; let univ_tvs = filter (not . (`elemVarSet` ex_vars)) qtvs
+ ex_tvs = varSetElems ex_vars
+ prov_theta = map evVarPred prov_dicts
+ req_theta = map evVarPred req_dicts
- ; univ_tvs <- mapM zonkQuantifiedTyVar univ_tvs
- ; let prov_theta = map evVarPred prov_dicts
- req_theta = map evVarPred req_dicts
+ ; univ_tvs <- mapM zonkQuantifiedTyVar univ_tvs
+ ; ex_tvs <- mapM zonkQuantifiedTyVar ex_tvs
; prov_theta <- zonkTcThetaType prov_theta
- ; req_theta <- zonkTcThetaType req_theta
+ ; req_theta <- zonkTcThetaType req_theta
+ ; pat_ty <- zonkTcType pat_ty
+ ; args <- mapM zonkId args
; traceTc "tcPatSynDecl: ex" (ppr ex_tvs $$
ppr prov_theta $$
@@ -91,19 +93,24 @@ tcPatSynDecl lname@(L _ name) details lpat dir
prov_dicts req_dicts
prov_theta req_theta
pat_ty
- ; m_wrapper <- tcPatSynWrapper lname lpat dir args
- univ_tvs ex_tvs theta pat_ty
- ; let binds = matcher_bind `unionBags` maybe emptyBag snd m_wrapper
+
+ ; wrapper_id <- if isBidirectional dir
+ then fmap Just $ mkPatSynWrapperId lname args univ_tvs ex_tvs theta pat_ty
+ else return Nothing
; traceTc "tcPatSynDecl }" $ ppr name
; let patSyn = mkPatSyn name is_infix
- args
+ (map varType args)
univ_tvs ex_tvs
prov_theta req_theta
pat_ty
- matcher_id (fmap fst m_wrapper)
- ; return (patSyn, binds) }
+ matcher_id wrapper_id
+ ; return (patSyn, matcher_bind) }
+
+\end{code}
+
+\begin{code}
tcPatSynMatcher :: Located Name
-> LPat Id
-> [Var]
@@ -113,12 +120,18 @@ tcPatSynMatcher :: Located Name
-> ThetaType -> ThetaType
-> TcType
-> TcM (Id, LHsBinds Id)
+-- See Note [Matchers and wrappers for pattern synonyms] in PatSyn
tcPatSynMatcher (L loc name) lpat args univ_tvs ex_tvs ev_binds prov_dicts req_dicts prov_theta req_theta pat_ty
= do { res_tv <- zonkQuantifiedTyVar =<< newFlexiTyVar liftedTypeKind
- ; (matcher_id, res_ty, cont_ty) <- mkPatSynMatcherId name args
- univ_tvs ex_tvs
- prov_theta req_theta
- pat_ty res_tv
+ ; matcher_name <- newImplicitBinder name mkMatcherOcc
+ ; let res_ty = TyVarTy res_tv
+ cont_ty = mkSigmaTy ex_tvs prov_theta $
+ mkFunTys (map varType args) res_ty
+
+ ; let matcher_tau = mkFunTys [pat_ty, cont_ty, res_ty] res_ty
+ matcher_sigma = mkSigmaTy (res_tv:univ_tvs) req_theta matcher_tau
+ matcher_id = mkExportedLocalId VanillaId matcher_name matcher_sigma
+
; traceTc "tcPatSynMatcher" (ppr name $$ ppr (idType matcher_id))
; let matcher_lid = L loc matcher_id
@@ -141,18 +154,21 @@ tcPatSynMatcher (L loc name) lpat args univ_tvs ex_tvs ev_binds prov_dicts req_d
MG{ mg_alts = cases
, mg_arg_tys = [pat_ty]
, mg_res_ty = res_ty
+ , mg_origin = Generated
}
body' = noLoc $
HsLam $
MG{ mg_alts = [mkSimpleMatch args body]
, mg_arg_tys = [pat_ty, cont_ty, res_ty]
, mg_res_ty = res_ty
+ , mg_origin = Generated
}
match = mkMatch [] (mkHsLams (res_tv:univ_tvs) req_dicts body') EmptyLocalBinds
mg = MG{ mg_alts = [match]
, mg_arg_tys = []
, mg_res_ty = res_ty
+ , mg_origin = Generated
}
; let bind = FunBind{ fun_id = matcher_lid
@@ -161,7 +177,7 @@ tcPatSynMatcher (L loc name) lpat args univ_tvs ex_tvs ev_binds prov_dicts req_d
, fun_co_fn = idHsWrapper
, bind_fvs = emptyNameSet
, fun_tick = Nothing }
- matcher_bind = unitBag (Generated, noLoc bind)
+ matcher_bind = unitBag (noLoc bind)
; traceTc "tcPatSynMatcher" (ppr matcher_bind)
@@ -171,123 +187,195 @@ tcPatSynMatcher (L loc name) lpat args univ_tvs ex_tvs ev_binds prov_dicts req_d
name <- newName . mkVarOccFS . fsLit $ s
return $ mkLocalId name ty
-tcPatSynWrapper :: Located Name
- -> LPat Name
- -> HsPatSynDir Name
- -> [Var]
- -> [TyVar] -> [TyVar]
- -> ThetaType
- -> TcType
- -> TcM (Maybe (Id, LHsBinds Id))
-tcPatSynWrapper lname lpat dir args univ_tvs ex_tvs theta pat_ty
- = do { let argNames = mkNameSet (map Var.varName args)
- ; m_expr <- runMaybeT $ tcPatToExpr argNames lpat
- ; case (dir, m_expr) of
- (Unidirectional, _) ->
- return Nothing
- (ImplicitBidirectional, Nothing) ->
- cannotInvertPatSynErr (unLoc lpat)
- (ImplicitBidirectional, Just lexpr) ->
- fmap Just $ tc_pat_syn_wrapper_from_expr lname lexpr args univ_tvs ex_tvs theta pat_ty }
-
-tc_pat_syn_wrapper_from_expr :: Located Name
- -> LHsExpr Name
- -> [Var]
- -> [TyVar] -> [TyVar]
- -> ThetaType
- -> Type
- -> TcM (Id, LHsBinds Id)
-tc_pat_syn_wrapper_from_expr (L loc name) lexpr args univ_tvs ex_tvs theta pat_ty
+isBidirectional :: HsPatSynDir a -> Bool
+isBidirectional Unidirectional = False
+isBidirectional ImplicitBidirectional = True
+isBidirectional ExplicitBidirectional{} = True
+
+tcPatSynWrapper :: PatSynBind Name Name
+ -> TcM (LHsBinds Id)
+-- See Note [Matchers and wrappers for pattern synonyms] in PatSyn
+tcPatSynWrapper PSB{ psb_id = L loc name, psb_def = lpat, psb_dir = dir, psb_args = details }
+ = case dir of
+ Unidirectional -> return emptyBag
+ ImplicitBidirectional ->
+ do { wrapper_id <- tcLookupPatSynWrapper name
+ ; lexpr <- case tcPatToExpr (mkNameSet args) lpat of
+ Nothing -> cannotInvertPatSynErr lpat
+ Just lexpr -> return lexpr
+ ; let wrapper_args = map (noLoc . VarPat) args
+ wrapper_lname = L (getLoc lpat) (idName wrapper_id)
+ wrapper_match = mkMatch wrapper_args lexpr EmptyLocalBinds
+ wrapper_bind = mkTopFunBind Generated wrapper_lname [wrapper_match]
+ ; mkPatSynWrapper wrapper_id wrapper_bind }
+ ExplicitBidirectional mg ->
+ do { wrapper_id <- tcLookupPatSynWrapper name
+ ; mkPatSynWrapper wrapper_id $
+ FunBind{ fun_id = L loc (idName wrapper_id)
+ , fun_infix = False
+ , fun_matches = mg
+ , fun_co_fn = idHsWrapper
+ , bind_fvs = placeHolderNames
+ , fun_tick = Nothing }}
+ where
+ args = map unLoc $ case details of
+ PrefixPatSyn args -> args
+ InfixPatSyn arg1 arg2 -> [arg1, arg2]
+
+ tcLookupPatSynWrapper name
+ = do { patsyn <- tcLookupPatSyn name
+ ; case patSynWrapper patsyn of
+ Nothing -> panic "tcLookupPatSynWrapper"
+ Just wrapper_id -> return wrapper_id }
+
+mkPatSynWrapperId :: Located Name
+ -> [Var] -> [TyVar] -> [TyVar] -> ThetaType -> Type
+ -> TcM Id
+mkPatSynWrapperId (L _ name) args univ_tvs ex_tvs theta pat_ty
= do { let qtvs = univ_tvs ++ ex_tvs
- ; (subst, qtvs') <- tcInstSigTyVars qtvs
- ; let theta' = substTheta subst theta
+ ; (subst, wrapper_tvs) <- tcInstSkolTyVars qtvs
+ ; let wrapper_theta = substTheta subst theta
pat_ty' = substTy subst pat_ty
args' = map (\arg -> setVarType arg $ substTy subst (varType arg)) args
-
- ; wrapper_id <- mkPatSynWrapperId name args qtvs theta pat_ty
- ; let wrapper_name = getName wrapper_id
- wrapper_lname = L loc wrapper_name
- -- (wrapper_tvs, wrapper_theta, wrapper_tau) = tcSplitSigmaTy (idType wrapper_id)
- wrapper_tvs = qtvs'
- wrapper_theta = theta'
wrapper_tau = mkFunTys (map varType args') pat_ty'
+ wrapper_sigma = mkSigmaTy wrapper_tvs wrapper_theta wrapper_tau
- ; let wrapper_args = map (noLoc . VarPat . Var.varName) args'
- wrapper_match = mkMatch wrapper_args lexpr EmptyLocalBinds
- bind = mkTopFunBind wrapper_lname [wrapper_match]
- lbind = noLoc bind
- ; let sig = TcSigInfo{ sig_id = wrapper_id
- , sig_tvs = map (\tv -> (Nothing, tv)) wrapper_tvs
- , sig_theta = wrapper_theta
- , sig_tau = wrapper_tau
- , sig_loc = loc
- }
- ; (wrapper_binds, _, _) <- tcPolyCheck NonRecursive (const []) sig (Generated, lbind)
+ ; wrapper_name <- newImplicitBinder name mkDataConWrapperOcc
+ ; return $ mkExportedLocalId VanillaId wrapper_name wrapper_sigma }
+
+mkPatSynWrapper :: Id
+ -> HsBind Name
+ -> TcM (LHsBinds Id)
+mkPatSynWrapper wrapper_id bind
+ = do { (wrapper_binds, _, _) <- tcPolyCheck NonRecursive (const []) sig (noLoc bind)
; traceTc "tcPatSynDecl wrapper" $ ppr wrapper_binds
; traceTc "tcPatSynDecl wrapper type" $ ppr (varType wrapper_id)
- ; return (wrapper_id, wrapper_binds) }
+ ; return wrapper_binds }
+ where
+ sig = TcSigInfo{ sig_id = wrapper_id
+ , sig_tvs = map (\tv -> (Nothing, tv)) wrapper_tvs
+ , sig_theta = wrapper_theta
+ , sig_tau = wrapper_tau
+ , sig_loc = noSrcSpan
+ }
+ (wrapper_tvs, wrapper_theta, wrapper_tau) = tcSplitSigmaTy (idType wrapper_id)
+
+\end{code}
+
+Note [As-patterns in pattern synonym definitions]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rationale for rejecting as-patterns in pattern synonym definitions
+is that an as-pattern would introduce nonindependent pattern synonym
+arguments, e.g. given a pattern synonym like:
+
+ pattern K x y = x@(Just y)
-tcNothing :: MaybeT TcM a
-tcNothing = MaybeT (return Nothing)
+one could write a nonsensical function like
-withLoc :: (a -> MaybeT TcM b) -> Located a -> MaybeT TcM (Located b)
-withLoc fn (L loc x) = MaybeT $ setSrcSpan loc $
- do { y <- runMaybeT $ fn x
- ; return (fmap (L loc) y) }
+ f (K Nothing x) = ...
-tcPatToExpr :: NameSet -> LPat Name -> MaybeT TcM (LHsExpr Name)
+or
+ g (K (Just True) False) = ...
+
+\begin{code}
+tcCheckPatSynPat :: LPat Name -> TcM ()
+tcCheckPatSynPat = go
+ where
+ go :: LPat Name -> TcM ()
+ go = addLocM go1
+
+ go1 :: Pat Name -> TcM ()
+ go1 (ConPatIn _ info) = mapM_ go (hsConPatArgs info)
+ go1 VarPat{} = return ()
+ go1 WildPat{} = return ()
+ go1 p@(AsPat _ _) = asPatInPatSynErr p
+ go1 (LazyPat pat) = go pat
+ go1 (ParPat pat) = go pat
+ go1 (BangPat pat) = go pat
+ go1 (PArrPat pats _) = mapM_ go pats
+ go1 (ListPat pats _ _) = mapM_ go pats
+ go1 (TuplePat pats _ _) = mapM_ go pats
+ go1 LitPat{} = return ()
+ go1 NPat{} = return ()
+ go1 (SigPatIn pat _) = go pat
+ go1 (ViewPat _ pat _) = go pat
+ go1 p@SplicePat{} = thInPatSynErr p
+ go1 p@QuasiQuotePat{} = thInPatSynErr p
+ go1 p@NPlusKPat{} = nPlusKPatInPatSynErr p
+ go1 ConPatOut{} = panic "ConPatOut in output of renamer"
+ go1 SigPatOut{} = panic "SigPatOut in output of renamer"
+ go1 CoPat{} = panic "CoPat in output of renamer"
+
+asPatInPatSynErr :: OutputableBndr name => Pat name -> TcM a
+asPatInPatSynErr pat
+ = failWithTc $
+ hang (ptext (sLit "Pattern synonym definition cannot contain as-patterns (@):"))
+ 2 (ppr pat)
+
+thInPatSynErr :: OutputableBndr name => Pat name -> TcM a
+thInPatSynErr pat
+ = failWithTc $
+ hang (ptext (sLit "Pattern synonym definition cannot contain Template Haskell:"))
+ 2 (ppr pat)
+
+nPlusKPatInPatSynErr :: OutputableBndr name => Pat name -> TcM a
+nPlusKPatInPatSynErr pat
+ = failWithTc $
+ hang (ptext (sLit "Pattern synonym definition cannot contain n+k-pattern:"))
+ 2 (ppr pat)
+
+tcPatToExpr :: NameSet -> LPat Name -> Maybe (LHsExpr Name)
tcPatToExpr lhsVars = go
where
- go :: LPat Name -> MaybeT TcM (LHsExpr Name)
+ go :: LPat Name -> Maybe (LHsExpr Name)
go (L loc (ConPatIn conName info))
- = MaybeT . setSrcSpan loc . runMaybeT $ do
+ = do
{ let con = L loc (HsVar (unLoc conName))
; exprs <- mapM go (hsConPatArgs info)
; return $ foldl (\x y -> L loc (HsApp x y)) con exprs }
- go p = withLoc go1 p
+ go (L loc p) = fmap (L loc) $ go1 p
- go1 :: Pat Name -> MaybeT TcM (HsExpr Name)
+ go1 :: Pat Name -> Maybe (HsExpr Name)
go1 (VarPat var)
- | var `elemNameSet` lhsVars = return (HsVar var)
- | otherwise = tcNothing
- go1 p@(AsPat _ _) = asPatInPatSynErr p
- go1 (LazyPat pat) = fmap HsPar (go pat)
- go1 (ParPat pat) = fmap HsPar (go pat)
- go1 (BangPat pat) = fmap HsPar (go pat)
+ | var `elemNameSet` lhsVars = return $ HsVar var
+ | otherwise = Nothing
+ go1 (LazyPat pat) = fmap HsPar $ go pat
+ go1 (ParPat pat) = fmap HsPar $ go pat
+ go1 (BangPat pat) = fmap HsPar $ go pat
go1 (PArrPat pats ptt)
= do { exprs <- mapM go pats
- ; return (ExplicitPArr ptt exprs) }
+ ; return $ ExplicitPArr ptt exprs }
go1 (ListPat pats ptt reb)
= do { exprs <- mapM go pats
- ; return (ExplicitList ptt (fmap snd reb) exprs) }
+ ; return $ ExplicitList ptt (fmap snd reb) exprs }
go1 (TuplePat pats box _)
= do { exprs <- mapM go pats
; return (ExplicitTuple (map Present exprs) box)
}
- go1 (LitPat lit) = return (HsLit lit)
- go1 (NPat n Nothing _) = return (HsOverLit n)
- go1 (NPat n (Just neg) _) = return (noLoc neg `HsApp` noLoc (HsOverLit n))
+ go1 (LitPat lit) = return $ HsLit lit
+ go1 (NPat n Nothing _) = return $ HsOverLit n
+ go1 (NPat n (Just neg) _) = return $ noLoc neg `HsApp` noLoc (HsOverLit n)
go1 (SigPatIn pat (HsWB ty _ _))
= do { expr <- go pat
- ; return (ExprWithTySig expr ty) }
+ ; return $ ExprWithTySig expr ty }
go1 (ConPatOut{}) = panic "ConPatOut in output of renamer"
go1 (SigPatOut{}) = panic "SigPatOut in output of renamer"
go1 (CoPat{}) = panic "CoPat in output of renamer"
- go1 _ = tcNothing
+ go1 _ = Nothing
-asPatInPatSynErr :: OutputableBndr name => Pat name -> MaybeT TcM a
-asPatInPatSynErr pat
- = MaybeT . failWithTc $
- hang (ptext (sLit "Pattern synonym definition cannot contain as-patterns (@):"))
- 2 (ppr pat)
-
--- TODO: Highlight sub-pattern that causes the problem
-cannotInvertPatSynErr :: OutputableBndr name => Pat name -> TcM a
-cannotInvertPatSynErr pat
- = failWithTc $
+cannotInvertPatSynErr :: OutputableBndr name => LPat name -> TcM a
+cannotInvertPatSynErr (L loc pat)
+ = setSrcSpan loc $ failWithTc $
hang (ptext (sLit "Right-hand side of bidirectional pattern synonym cannot be used as an expression"))
2 (ppr pat)
+-- Walk the whole pattern and for all ConPatOuts, collect the
+-- existentially-bound type variables and evidence binding variables.
+--
+-- These are used in computing the type of a pattern synonym and also
+-- in generating matcher functions, since success continuations need
+-- to be passed these pattern-bound evidences.
tcCollectEx :: LPat Id -> TcM (TyVarSet, [EvVar])
tcCollectEx = return . go
where
diff --git a/compiler/typecheck/TcPatSyn.lhs-boot b/compiler/typecheck/TcPatSyn.lhs-boot
index d0420c0c31cc..700137c16c12 100644
--- a/compiler/typecheck/TcPatSyn.lhs-boot
+++ b/compiler/typecheck/TcPatSyn.lhs-boot
@@ -3,14 +3,13 @@ module TcPatSyn where
import Name ( Name )
import Id ( Id )
-import HsSyn ( LPat, HsPatSynDetails, HsPatSynDir, LHsBinds )
+import HsSyn ( PatSynBind, LHsBinds )
import TcRnTypes ( TcM )
-import SrcLoc ( Located )
import PatSyn ( PatSyn )
-tcPatSynDecl :: Located Name
- -> HsPatSynDetails (Located Name)
- -> LPat Name
- -> HsPatSynDir Name
+tcPatSynDecl :: PatSynBind Name Name
-> TcM (PatSyn, LHsBinds Id)
+
+tcPatSynWrapper :: PatSynBind Name Name
+ -> TcM (LHsBinds Id)
\end{code}
diff --git a/compiler/typecheck/TcRnDriver.lhs b/compiler/typecheck/TcRnDriver.lhs
index dad2c67389e7..cd27e9d044fe 100644
--- a/compiler/typecheck/TcRnDriver.lhs
+++ b/compiler/typecheck/TcRnDriver.lhs
@@ -5,6 +5,8 @@
\section[TcMovectle]{Typechecking a whole module}
\begin{code}
+{-# LANGUAGE CPP, NondecreasingIndentation #-}
+
module TcRnDriver (
#ifdef GHCI
tcRnStmt, tcRnExpr, tcRnType,
@@ -13,12 +15,12 @@ module TcRnDriver (
getModuleInterface,
tcRnDeclsi,
isGHCiMonad,
+ runTcInteractive, -- Used by GHC API clients (Trac #8878)
#endif
tcRnLookupName,
tcRnGetInfo,
tcRnModule, tcRnModuleTcRnM,
- tcTopSrcDecls,
- tcRnExtCore
+ tcTopSrcDecls
) where
#ifdef GHCI
@@ -57,10 +59,9 @@ import LoadIface
import RnNames
import RnEnv
import RnSource
-import PprCore
-import CoreSyn
import ErrUtils
import Id
+import IdInfo( IdDetails( VanillaId ) )
import VarEnv
import Module
import UniqFM
@@ -78,14 +79,11 @@ import DataCon
import Type
import Class
import CoAxiom
-import Inst ( tcGetInstEnvs, tcGetInsts )
+import Inst ( tcGetInstEnvs )
import Annotations
import Data.List ( sortBy )
-import Data.IORef ( readIORef )
import Data.Ord
-#ifndef GHCI
-import BasicTypes ( Origin(..) )
-#else
+#ifdef GHCI
import BasicTypes hiding( SuccessFlag(..) )
import TcType ( isUnitTy, isTauTy )
import TcHsType
@@ -305,107 +303,6 @@ tcRnImports hsc_env import_decls
\end{code}
-%************************************************************************
-%* *
- Type-checking external-core modules
-%* *
-%************************************************************************
-
-\begin{code}
-tcRnExtCore :: HscEnv
- -> HsExtCore RdrName
- -> IO (Messages, Maybe ModGuts)
- -- Nothing => some error occurred
-
-tcRnExtCore hsc_env (HsExtCore this_mod decls src_binds)
- -- The decls are IfaceDecls; all names are original names
- = do { showPass (hsc_dflags hsc_env) "Renamer/typechecker" ;
-
- initTc hsc_env ExtCoreFile False this_mod $ do {
-
- let { ldecls = map noLoc decls } ;
-
- -- Bring the type and class decls into scope
- -- ToDo: check that this doesn't need to extract the val binds.
- -- It seems that only the type and class decls need to be in scope below because
- -- (a) tcTyAndClassDecls doesn't need the val binds, and
- -- (b) tcExtCoreBindings doesn't need anything
- -- (in fact, it might not even need to be in the scope of
- -- this tcg_env at all)
- (tc_envs, _bndrs) <- getLocalNonValBinders emptyFsEnv {- no fixity decls -}
- (mkFakeGroup ldecls) ;
- setEnvs tc_envs $ do {
-
- (rn_decls, _fvs) <- checkNoErrs $ rnTyClDecls [] [mkTyClGroup ldecls] ;
- -- The empty list is for extra dependencies coming from .hs-boot files
- -- See Note [Extra dependencies from .hs-boot files] in RnSource
-
- -- Dump trace of renaming part
- rnDump (ppr rn_decls) ;
-
- -- Typecheck them all together so that
- -- any mutually recursive types are done right
- -- Just discard the auxiliary bindings; they are generated
- -- only for Haskell source code, and should already be in Core
- tcg_env <- tcTyAndClassDecls emptyModDetails rn_decls ;
- safe_mode <- liftIO $ finalSafeMode (hsc_dflags hsc_env) tcg_env ;
- dep_files <- liftIO $ readIORef (tcg_dependent_files tcg_env) ;
-
- setGblEnv tcg_env $ do {
- -- Make the new type env available to stuff slurped from interface files
-
- -- Now the core bindings
- core_binds <- initIfaceExtCore (tcExtCoreBindings src_binds) ;
-
-
- -- Wrap up
- let {
- bndrs = bindersOfBinds core_binds ;
- my_exports = map (Avail . idName) bndrs ;
- -- ToDo: export the data types also?
-
- mod_guts = ModGuts { mg_module = this_mod,
- mg_boot = False,
- mg_used_names = emptyNameSet, -- ToDo: compute usage
- mg_used_th = False,
- mg_dir_imps = emptyModuleEnv, -- ??
- mg_deps = noDependencies, -- ??
- mg_exports = my_exports,
- mg_tcs = tcg_tcs tcg_env,
- mg_insts = tcg_insts tcg_env,
- mg_fam_insts = tcg_fam_insts tcg_env,
- mg_inst_env = tcg_inst_env tcg_env,
- mg_fam_inst_env = tcg_fam_inst_env tcg_env,
- mg_patsyns = [], -- TODO
- mg_rules = [],
- mg_vect_decls = [],
- mg_anns = [],
- mg_binds = core_binds,
-
- -- Stubs
- mg_rdr_env = emptyGlobalRdrEnv,
- mg_fix_env = emptyFixityEnv,
- mg_warns = NoWarnings,
- mg_foreign = NoStubs,
- mg_hpc_info = emptyHpcInfo False,
- mg_modBreaks = emptyModBreaks,
- mg_vect_info = noVectInfo,
- mg_safe_haskell = safe_mode,
- mg_trust_pkg = False,
- mg_dependent_files = dep_files
- } } ;
-
- tcCoreDump mod_guts ;
-
- return mod_guts
- }}}}
-
-mkFakeGroup :: [LTyClDecl a] -> HsGroup a
-mkFakeGroup decls -- Rather clumsy; lots of unused fields
- = emptyRdrGroup { hs_tyclds = [mkTyClGroup decls] }
-\end{code}
-
-
%************************************************************************
%* *
Type-checking the top level of a module
@@ -648,12 +545,35 @@ checkHiBootIface
tcg_env@(TcGblEnv { tcg_src = hs_src, tcg_binds = binds,
tcg_insts = local_insts,
tcg_type_env = local_type_env, tcg_exports = local_exports })
- (ModDetails { md_insts = boot_insts, md_fam_insts = boot_fam_insts,
- md_types = boot_type_env, md_exports = boot_exports })
+ boot_details
| isHsBoot hs_src -- Current module is already a hs-boot file!
= return tcg_env
| otherwise
+ = do { mb_dfun_prs <- checkHiBootIface' local_insts local_type_env
+ local_exports boot_details
+ ; let dfun_prs = catMaybes mb_dfun_prs
+ boot_dfuns = map fst dfun_prs
+ dfun_binds = listToBag [ mkVarBind boot_dfun (nlHsVar dfun)
+ | (boot_dfun, dfun) <- dfun_prs ]
+ type_env' = extendTypeEnvWithIds local_type_env boot_dfuns
+ tcg_env' = tcg_env { tcg_binds = binds `unionBags` dfun_binds }
+
+ ; setGlobalTypeEnv tcg_env' type_env' }
+ -- Update the global type env *including* the knot-tied one
+ -- so that if the source module reads in an interface unfolding
+ -- mentioning one of the dfuns from the boot module, then it
+ -- can "see" that boot dfun. See Trac #4003
+
+checkHiBootIface' :: [ClsInst] -> TypeEnv -> [AvailInfo]
+ -> ModDetails -> TcM [Maybe (Id, Id)]
+-- Variant which doesn't require a full TcGblEnv; you could get the
+-- local components from another ModDetails.
+
+checkHiBootIface'
+ local_insts local_type_env local_exports
+ (ModDetails { md_insts = boot_insts, md_fam_insts = boot_fam_insts,
+ md_types = boot_type_env, md_exports = boot_exports })
= do { traceTc "checkHiBootIface" $ vcat
[ ppr boot_type_env, ppr boot_insts, ppr boot_exports]
@@ -670,19 +590,11 @@ checkHiBootIface
-- Check instance declarations
; mb_dfun_prs <- mapM check_inst boot_insts
- ; let dfun_prs = catMaybes mb_dfun_prs
- boot_dfuns = map fst dfun_prs
- dfun_binds = listToBag [ (Generated, mkVarBind boot_dfun (nlHsVar dfun))
- | (boot_dfun, dfun) <- dfun_prs ]
- type_env' = extendTypeEnvWithIds local_type_env boot_dfuns
- tcg_env' = tcg_env { tcg_binds = binds `unionBags` dfun_binds }
; failIfErrsM
- ; setGlobalTypeEnv tcg_env' type_env' }
- -- Update the global type env *including* the knot-tied one
- -- so that if the source module reads in an interface unfolding
- -- mentioning one of the dfuns from the boot module, then it
- -- can "see" that boot dfun. See Trac #4003
+
+ ; return mb_dfun_prs }
+
where
check_export boot_avail -- boot_avail is exported by the boot iface
| name `elem` dfun_names = return ()
@@ -736,7 +648,7 @@ checkHiBootIface
where
boot_dfun = instanceDFunId boot_inst
boot_inst_ty = idType boot_dfun
- local_boot_dfun = Id.mkExportedLocalId (idName boot_dfun) boot_inst_ty
+ local_boot_dfun = Id.mkExportedLocalId VanillaId (idName boot_dfun) boot_inst_ty
-- This has to compare the TyThing from the .hi-boot file to the TyThing
@@ -784,17 +696,14 @@ checkBootTyCon tc1 tc2
(_, rho_ty2) = splitForAllTys (idType id2)
op_ty2 = funResultTy rho_ty2
- eqAT (tc1, def_ats1) (tc2, def_ats2)
+ eqAT (ATI tc1 def_ats1) (ATI tc2 def_ats2)
= checkBootTyCon tc1 tc2 &&
- eqListBy eqATDef def_ats1 def_ats2
+ eqATDef def_ats1 def_ats2
-- Ignore the location of the defaults
- eqATDef (CoAxBranch { cab_tvs = tvs1, cab_lhs = ty_pats1, cab_rhs = ty1 })
- (CoAxBranch { cab_tvs = tvs2, cab_lhs = ty_pats2, cab_rhs = ty2 })
- | Just env <- eqTyVarBndrs emptyRnEnv2 tvs1 tvs2
- = eqListBy (eqTypeX env) ty_pats1 ty_pats2 &&
- eqTypeX env ty1 ty2
- | otherwise = False
+ eqATDef Nothing Nothing = True
+ eqATDef (Just ty1) (Just ty2) = eqTypeX env ty1 ty2
+ eqATDef _ _ = False
eqFD (as1,bs1) (as2,bs2) =
eqListBy (eqTypeX env) (mkTyVarTys as1) (mkTyVarTys as2) &&
@@ -938,218 +847,6 @@ rnTopSrcDecls extra_deps group
\end{code}
-%************************************************************************
-%* *
- AMP warnings
- The functions defined here issue warnings according to
- the 2013 Applicative-Monad proposal. (Trac #8004)
-%* *
-%************************************************************************
-
-Note [No AMP warning with NoImplicitPrelude]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If you have -XNoImplicitPrelude, then we suppress the AMP warnings.
-The AMP warnings need access to Monad, Applicative, etc, and they
-are defined in 'base'. If, when compiling package 'ghc-prim' (say),
-you try to load Monad (from 'base'), chaos results because 'base'
-depends on 'ghc-prim'. See Note [Home module load error] in LoadIface,
-and Trac #8320.
-
-Using -XNoImplicitPrelude is a proxy for ensuring that all the
-'base' modules are below the home module in the dependency tree.
-
-\begin{code}
--- | Main entry point for generating AMP warnings
-tcAmpWarn :: TcM ()
-tcAmpWarn =
- do { implicit_prel <- xoptM Opt_ImplicitPrelude
- ; warnFlag <- woptM Opt_WarnAMP
- ; when (warnFlag && implicit_prel) $ do {
- -- See Note [No AMP warning with NoImplicitPrelude]
-
- -- Monad without Applicative
- ; tcAmpMissingParentClassWarn monadClassName
- applicativeClassName
-
- -- MonadPlus without Alternative
- ; tcAmpMissingParentClassWarn monadPlusClassName
- alternativeClassName
-
- -- Custom local definitions of join/pure/<*>
- ; mapM_ tcAmpFunctionWarn [joinMName, apAName, pureAName]
- }}
-
-
-
--- | Warn on local definitions of names that would clash with Prelude versions,
--- i.e. join/pure/<*>
---
--- A name clashes if the following criteria are met:
--- 1. It would is imported (unqualified) from Prelude
--- 2. It is locally defined in the current module
--- 3. It has the same literal name as the reference function
--- 4. It is not identical to the reference function
-tcAmpFunctionWarn :: Name -- ^ Name to check, e.g. joinMName for join
- -> TcM ()
-tcAmpFunctionWarn name = do
- { traceTc "tcAmpFunctionWarn/wouldBeImported" empty
- -- Is the name imported (unqualified) from Prelude? (Point 4 above)
- ; rnImports <- fmap (map unLoc . tcg_rn_imports) getGblEnv
- -- (Note that this automatically handles -XNoImplicitPrelude, as Prelude
- -- will not appear in rnImports automatically if it is set.)
-
- -- Continue only the name is imported from Prelude
- ; when (tcAmpImportViaPrelude name rnImports) $ do
- -- Handle 2.-4.
- { rdrElts <- fmap (concat . occEnvElts . tcg_rdr_env) getGblEnv
-
- ; let clashes :: GlobalRdrElt -> Bool
- clashes x = and [ gre_prov x == LocalDef
- , nameOccName (gre_name x) == nameOccName name
- , gre_name x /= name
- ]
-
- -- List of all offending definitions
- clashingElts :: [GlobalRdrElt]
- clashingElts = filter clashes rdrElts
-
- ; traceTc "tcAmpFunctionWarn/amp_prelude_functions"
- (hang (ppr name) 4 (sep [ppr clashingElts]))
-
- ; let warn_msg x = addWarnAt (nameSrcSpan $ gre_name x) . hsep $
- [ ptext (sLit "Local definition of")
- , quotes . ppr . nameOccName $ gre_name x
- , ptext (sLit "clashes with a future Prelude name")
- , ptext (sLit "- this will become an error in GHC 7.10,")
- , ptext (sLit "under the Applicative-Monad Proposal.")
- ]
- ; mapM_ warn_msg clashingElts
- }}
-
--- | Is the given name imported via Prelude?
---
--- This function makes sure that e.g. "import Prelude (map)" should silence
--- AMP warnings about "join" even when they are locally defined.
---
--- Possible scenarios:
--- a) Prelude is imported implicitly, issue warnings.
--- b) Prelude is imported explicitly, but without mentioning the name in
--- question. Issue no warnings.
--- c) Prelude is imported hiding the name in question. Issue no warnings.
--- d) Qualified import of Prelude, no warnings.
-tcAmpImportViaPrelude :: Name
- -> [ImportDecl Name]
- -> Bool
-tcAmpImportViaPrelude name = any importViaPrelude
- where
- isPrelude :: ImportDecl Name -> Bool
- isPrelude imp = unLoc (ideclName imp) == pRELUDE_NAME
-
- -- Implicit (Prelude) import?
- isImplicit :: ImportDecl Name -> Bool
- isImplicit = ideclImplicit
-
- -- Unqualified import?
- isUnqualified :: ImportDecl Name -> Bool
- isUnqualified = not . ideclQualified
-
- second :: (a -> b) -> (x, a) -> (x, b)
- second f (x, y) = (x, f y)
-
- -- List of explicitly imported (or hidden) Names from a single import.
- -- Nothing -> No explicit imports
- -- Just (False, ) -> Explicit import list of
- -- Just (True , ) -> Explicit hiding of
- importList :: ImportDecl Name -> Maybe (Bool, [Name])
- importList = fmap (second (map (ieName . unLoc))) . ideclHiding
-
- -- Check whether the given name would be imported (unqualified) from
- -- an import declaration.
- importViaPrelude :: ImportDecl Name -> Bool
- importViaPrelude x = isPrelude x && isUnqualified x && or [
- -- Whole Prelude imported -> potential clash
- isImplicit x
- -- Explicit import/hiding list, if applicable
- , case importList x of
- Just (False, explicit) -> nameOccName name `elem` map nameOccName explicit
- Just (True , hidden ) -> nameOccName name `notElem` map nameOccName hidden
- Nothing -> False
- ]
-
--- | Issue a warning for instance definitions lacking a should-be parent class.
--- Used for Monad without Applicative and MonadPlus without Alternative.
-tcAmpMissingParentClassWarn :: Name -- ^ Class instance is defined for
- -> Name -- ^ Class it should also be instance of
- -> TcM ()
-
--- Notation: is* is for classes the type is an instance of, should* for those
--- that it should also be an instance of based on the corresponding
--- is*.
--- Example: in case of Applicative/Monad: is = Monad,
--- should = Applicative
-tcAmpMissingParentClassWarn isName shouldName
- = do { isClass' <- tcLookupClass_maybe isName
- ; shouldClass' <- tcLookupClass_maybe shouldName
- ; case (isClass', shouldClass') of
- (Just isClass, Just shouldClass) -> do
- { localInstances <- tcGetInsts
- ; let isInstance m = is_cls m == isClass
- isInsts = filter isInstance localInstances
- ; traceTc "tcAmpMissingParentClassWarn/isInsts" (ppr isInsts)
- ; forM_ isInsts $ checkShouldInst isClass shouldClass
- }
- _ -> return ()
- }
- where
- -- Checks whether the desired superclass exists in a given environment.
- checkShouldInst :: Class -- ^ Class of existing instance
- -> Class -- ^ Class there should be an instance of
- -> ClsInst -- ^ Existing instance
- -> TcM ()
- checkShouldInst isClass shouldClass isInst
- = do { instEnv <- tcGetInstEnvs
- ; let (instanceMatches, shouldInsts, _)
- = lookupInstEnv instEnv shouldClass (is_tys isInst)
-
- ; traceTc "tcAmpMissingParentClassWarn/checkShouldInst"
- (hang (ppr isInst) 4
- (sep [ppr instanceMatches, ppr shouldInsts]))
-
- -- ": Warning: is an instance of but not "
- -- e.g. "Foo is an instance of Monad but not Applicative"
- ; let instLoc = srcLocSpan . nameSrcLoc $ getName isInst
- warnMsg (Just name:_) =
- addWarnAt instLoc . hsep $
- [ quotes (ppr $ nameOccName name)
- , ptext (sLit "is an instance of")
- , ppr . nameOccName $ className isClass
- , ptext (sLit "but not")
- , ppr . nameOccName $ className shouldClass
- , ptext (sLit "- this will become an error in GHC 7.10,")
- , ptext (sLit "under the Applicative-Monad Proposal.")
- ]
- warnMsg _ = return ()
- ; when (null shouldInsts && null instanceMatches) $
- warnMsg (is_tcs isInst)
- }
-
-
--- | Looks up a class, returning Nothing on failure. Similar to
--- TcEnv.tcLookupClass, but does not issue any error messages.
---
--- In particular, it may be called by the AMP check on, say,
--- Control.Applicative.Applicative, well before Control.Applicative
--- has been compiled. In this case we just return Nothing, and the
--- AMP test is silently dropped.
-tcLookupClass_maybe :: Name -> TcM (Maybe Class)
-tcLookupClass_maybe name
- = do { mb_thing <- tcLookupImported_maybe name
- ; case mb_thing of
- Succeeded (ATyCon tc) | Just cls <- tyConClass_maybe tc -> return (Just cls)
- _ -> return Nothing }
-\end{code}
-
-
%************************************************************************
%* *
tcTopSrcDecls
@@ -1183,7 +880,6 @@ tcTopSrcDecls boot_details
-- Generate Applicative/Monad proposal (AMP) warnings
traceTc "Tc3b" empty ;
- tcAmpWarn ;
-- Foreign import declarations next.
traceTc "Tc4" empty ;
@@ -1362,7 +1058,7 @@ check_main dflags tcg_env
; let { root_main_name = mkExternalName rootMainKey rOOT_MAIN
(mkVarOccFS (fsLit "main"))
(getSrcSpan main_name)
- ; root_main_id = Id.mkExportedLocalId root_main_name
+ ; root_main_id = Id.mkExportedLocalId VanillaId root_main_name
(mkTyConApp ioTyCon [res_ty])
; co = mkWpTyApps [res_ty]
; rhs = nlHsApp (mkLHsWrap co (nlHsVar run_main_id)) main_expr
@@ -1370,7 +1066,7 @@ check_main dflags tcg_env
; return (tcg_env { tcg_main = Just main_name,
tcg_binds = tcg_binds tcg_env
- `snocBag` (Generated, main_bind),
+ `snocBag` main_bind,
tcg_dus = tcg_dus tcg_env
`plusDU` usesOnly (unitFV main_name)
-- Record the use of 'main', so that we don't
@@ -1605,14 +1301,14 @@ tcUserStmt (L loc (BodyStmt expr _ _ _))
; let fresh_it = itName uniq loc
matches = [mkMatch [] rn_expr emptyLocalBinds]
-- [it = expr]
- the_bind = L loc $ (mkTopFunBind (L loc fresh_it) matches) { bind_fvs = fvs }
+ the_bind = L loc $ (mkTopFunBind FromSource (L loc fresh_it) matches) { bind_fvs = fvs }
-- Care here! In GHCi the expression might have
-- free variables, and they in turn may have free type variables
-- (if we are at a breakpoint, say). We must put those free vars
-- [let it = expr]
let_stmt = L loc $ LetStmt $ HsValBinds $
- ValBindsOut [(NonRecursive,unitBag (FromSource, the_bind))] []
+ ValBindsOut [(NonRecursive,unitBag the_bind)] []
-- [it <- e]
bind_stmt = L loc $ BindStmt (L loc (VarPat fresh_it))
@@ -2042,7 +1738,7 @@ loadUnqualIfaces hsc_env ictxt
, let name = gre_name gre
, not (isInternalName name)
, let mod = nameModule name
- , not (modulePackageId mod == this_pkg || isInteractiveModule mod)
+ , not (modulePackageKey mod == this_pkg || isInteractiveModule mod)
-- Don't attempt to load an interface for stuff
-- from the command line, or from the home package
, isTcOcc (nameOccName name) -- Types and classes only
@@ -2078,17 +1774,6 @@ tcDump env
-- NB: foreign x-d's have undefined's in their types;
-- hence can't show the tc_fords
-tcCoreDump :: ModGuts -> TcM ()
-tcCoreDump mod_guts
- = do { dflags <- getDynFlags ;
- when (dopt Opt_D_dump_types dflags || dopt Opt_D_dump_tc dflags)
- (dumpTcRn (pprModGuts mod_guts)) ;
-
- -- Dump bindings if -ddump-tc
- dumpOptTcRn Opt_D_dump_tc (mkDumpDoc "Typechecker" full_dump) }
- where
- full_dump = pprCoreBindings (mg_binds mod_guts)
-
-- It's unpleasant having both pprModGuts and pprModDetails here
pprTcGblEnv :: TcGblEnv -> SDoc
pprTcGblEnv (TcGblEnv { tcg_type_env = type_env,
@@ -2106,7 +1791,7 @@ pprTcGblEnv (TcGblEnv { tcg_type_env = type_env,
, ptext (sLit "Dependent modules:") <+>
ppr (sortBy cmp_mp $ eltsUFM (imp_dep_mods imports))
, ptext (sLit "Dependent packages:") <+>
- ppr (sortBy stablePackageIdCmp $ imp_dep_pkgs imports)]
+ ppr (sortBy stablePackageKeyCmp $ imp_dep_pkgs imports)]
where -- The two uses of sortBy are just to reduce unnecessary
-- wobbling in testsuite output
cmp_mp (mod_name1, is_boot1) (mod_name2, is_boot2)
@@ -2114,12 +1799,6 @@ pprTcGblEnv (TcGblEnv { tcg_type_env = type_env,
`thenCmp`
(is_boot1 `compare` is_boot2)
-pprModGuts :: ModGuts -> SDoc
-pprModGuts (ModGuts { mg_tcs = tcs
- , mg_rules = rules })
- = vcat [ ppr_types [] (mkTypeEnv (map ATyCon tcs)),
- ppr_rules rules ]
-
ppr_types :: [ClsInst] -> TypeEnv -> SDoc
ppr_types insts type_env
= text "TYPE SIGNATURES" $$ nest 2 (ppr_sigs ids)
@@ -2170,13 +1849,5 @@ ppr_tydecls tycons
-- Print type constructor info; sort by OccName
= vcat (map ppr_tycon (sortBy (comparing getOccName) tycons))
where
- ppr_tycon tycon = vcat [ ppr (tyConName tycon) <+> dcolon <+> ppr (tyConKind tycon)
- -- Temporarily print the kind signature too
- , ppr (tyThingToIfaceDecl (ATyCon tycon)) ]
-
-ppr_rules :: [CoreRule] -> SDoc
-ppr_rules [] = empty
-ppr_rules rs = vcat [ptext (sLit "{-# RULES"),
- nest 2 (pprRules rs),
- ptext (sLit "#-}")]
+ ppr_tycon tycon = vcat [ ppr (tyThingToIfaceDecl (ATyCon tycon)) ]
\end{code}
diff --git a/compiler/typecheck/TcRnMonad.lhs b/compiler/typecheck/TcRnMonad.lhs
index b3d37f61780e..9dbc4206a551 100644
--- a/compiler/typecheck/TcRnMonad.lhs
+++ b/compiler/typecheck/TcRnMonad.lhs
@@ -5,7 +5,9 @@
Functions for working with the typechecker environment (setters, getters...).
\begin{code}
+{-# LANGUAGE CPP, ExplicitForAll, FlexibleInstances #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
+
module TcRnMonad(
module TcRnMonad,
module TcRnTypes,
@@ -24,7 +26,6 @@ import Module
import RdrName
import Name
import Type
-import Kind ( isSuperKind )
import TcType
import InstEnv
@@ -49,7 +50,7 @@ import FastString
import Panic
import Util
import Annotations
-import BasicTypes( TopLevelFlag, Origin )
+import BasicTypes( TopLevelFlag )
import Control.Exception
import Data.IORef
@@ -588,11 +589,6 @@ addLocM fn (L loc a) = setSrcSpan loc $ fn a
wrapLocM :: (a -> TcM b) -> Located a -> TcM (Located b)
wrapLocM fn (L loc a) = setSrcSpan loc $ do b <- fn a; return (L loc b)
-wrapOriginLocM :: (a -> TcM r) -> (Origin, Located a) -> TcM (Origin, Located r)
-wrapOriginLocM fn (origin, lbind)
- = do { lbind' <- wrapLocM fn lbind
- ; return (origin, lbind') }
-
wrapLocFstM :: (a -> TcM (b,c)) -> Located a -> TcM (Located b, c)
wrapLocFstM fn (L loc a) =
setSrcSpan loc $ do
@@ -1136,10 +1132,6 @@ setUntouchables untch thing_inside
isTouchableTcM :: TcTyVar -> TcM Bool
isTouchableTcM tv
- -- Kind variables are always touchable
- | isSuperKind (tyVarKind tv)
- = return False
- | otherwise
= do { env <- getLclEnv
; return (isTouchableMetaTyVar (tcl_untch env) tv) }
@@ -1213,9 +1205,10 @@ recordUnsafeInfer = getGblEnv >>= \env -> writeTcRef (tcg_safeInfer env) False
finalSafeMode :: DynFlags -> TcGblEnv -> IO SafeHaskellMode
finalSafeMode dflags tcg_env = do
safeInf <- readIORef (tcg_safeInfer tcg_env)
- return $ if safeInferOn dflags && not safeInf
- then Sf_None
- else safeHaskell dflags
+ return $ case safeHaskell dflags of
+ Sf_None | safeInferOn dflags && safeInf -> Sf_Safe
+ | otherwise -> Sf_None
+ s -> s
\end{code}
@@ -1255,17 +1248,6 @@ initIfaceTcRn thing_inside
; get_type_env = readTcRef (tcg_type_env_var tcg_env) }
; setEnvs (if_env, ()) thing_inside }
-initIfaceExtCore :: IfL a -> TcRn a
-initIfaceExtCore thing_inside
- = do { tcg_env <- getGblEnv
- ; let { mod = tcg_mod tcg_env
- ; doc = ptext (sLit "External Core file for") <+> quotes (ppr mod)
- ; if_env = IfGblEnv {
- if_rec_types = Just (mod, return (tcg_type_env tcg_env)) }
- ; if_lenv = mkIfLclEnv mod doc
- }
- ; setEnvs (if_env, if_lenv) thing_inside }
-
initIfaceCheck :: HscEnv -> IfG a -> IO a
-- Used when checking the up-to-date-ness of the old Iface
-- Initialise the environment with no useful info at all
diff --git a/compiler/typecheck/TcRnTypes.lhs b/compiler/typecheck/TcRnTypes.lhs
index 44dc3faa1e67..f46bdfd2d9b5 100644
--- a/compiler/typecheck/TcRnTypes.lhs
+++ b/compiler/typecheck/TcRnTypes.lhs
@@ -16,6 +16,8 @@ For state that is global and should be returned at the end (e.g not part
of the stack mechanism), you should use an TcRef (= IORef) to store them.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcRnTypes(
TcRnIf, TcRn, TcM, RnM, IfM, IfL, IfG, -- The monad is opaque outside this module
TcRef,
@@ -92,7 +94,7 @@ import Class ( Class )
import TyCon ( TyCon )
import ConLike ( ConLike(..) )
import DataCon ( DataCon, dataConUserType, dataConOrigArgTys )
-import PatSyn ( PatSyn, patSynId )
+import PatSyn ( PatSyn, patSynType )
import TcType
import Annotations
import InstEnv
@@ -294,7 +296,7 @@ data TcGblEnv
-- ^ Allows us to choose unique DFun names.
-- The next fields accumulate the payload of the module
- -- The binds, rules and foreign-decl fiels are collected
+ -- The binds, rules and foreign-decl fields are collected
-- initially in un-zonked form and are finally zonked in tcRnSrcDecls
tcg_rn_exports :: Maybe [Located (IE Name)],
@@ -323,6 +325,9 @@ data TcGblEnv
#endif /* GHCI */
tcg_ev_binds :: Bag EvBind, -- Top-level evidence bindings
+
+ -- Things defined in this module, or (in GHCi) in the interactive package
+ -- For the latter, see Note [The interactive package] in HscTypes
tcg_binds :: LHsBinds Id, -- Value bindings in this module
tcg_sigs :: NameSet, -- ...Top-level names that *lack* a signature
tcg_imp_specs :: [LTcSpecPrag], -- ...SPECIALISE prags for imported Ids
@@ -804,17 +809,17 @@ data ImportAvails
-- compiling M might not need to consult X.hi, but X
-- is still listed in M's dependencies.
- imp_dep_pkgs :: [PackageId],
+ imp_dep_pkgs :: [PackageKey],
-- ^ Packages needed by the module being compiled, whether directly,
-- or via other modules in this package, or via modules imported
-- from other packages.
- imp_trust_pkgs :: [PackageId],
+ imp_trust_pkgs :: [PackageKey],
-- ^ This is strictly a subset of imp_dep_pkgs and records the
-- packages the current module needs to trust for Safe Haskell
-- compilation to succeed. A package is required to be trusted if
-- we are dependent on a trustworthy module in that package.
- -- While perhaps making imp_dep_pkgs a tuple of (PackageId, Bool)
+ -- While perhaps making imp_dep_pkgs a tuple of (PackageKey, Bool)
-- where True for the bool indicates the package is required to be
-- trusted is the more logical design, doing so complicates a lot
-- of code not concerned with Safe Haskell.
@@ -1282,6 +1287,8 @@ data Implication
ic_fsks :: [TcTyVar], -- Extra flatten-skolems introduced by
-- by flattening the givens
+ -- See Note [Given flatten-skolems]
+
ic_no_eqs :: Bool, -- True <=> ic_givens have no equalities, for sure
-- False <=> ic_givens might have equalities
@@ -1741,11 +1748,14 @@ pprSkolInfo ArrowSkol = ptext (sLit "the arrow form")
pprSkolInfo (PatSkol cl mc) = case cl of
RealDataCon dc -> sep [ ptext (sLit "a pattern with constructor")
, nest 2 $ ppr dc <+> dcolon
- <+> ppr (dataConUserType dc) <> comma
+ <+> pprType (dataConUserType dc) <> comma
+ -- pprType prints forall's regardless of -fprint-explict-foralls
+ -- which is what we want here, since we might be saying
+ -- type variable 't' is bound by ...
, ptext (sLit "in") <+> pprMatchContext mc ]
PatSynCon ps -> sep [ ptext (sLit "a pattern with pattern synonym")
, nest 2 $ ppr ps <+> dcolon
- <+> ppr (varType (patSynId ps)) <> comma
+ <+> pprType (patSynType ps) <> comma
, ptext (sLit "in") <+> pprMatchContext mc ]
pprSkolInfo (InferSkol ids) = sep [ ptext (sLit "the inferred type of")
, vcat [ ppr name <+> dcolon <+> ppr ty
@@ -1850,9 +1860,9 @@ pprO (DerivOriginDC dc n) = hsep [ ptext (sLit "the"), speakNth n,
parens (ptext (sLit "type") <+> quotes (ppr ty)) ]
where ty = dataConOrigArgTys dc !! (n-1)
pprO (DerivOriginCoerce meth ty1 ty2)
- = fsep [ ptext (sLit "the coercion"), ptext (sLit "of the method")
- , quotes (ppr meth), ptext (sLit "from type"), quotes (ppr ty1)
- , ptext (sLit "to type"), quotes (ppr ty2) ]
+ = sep [ ptext (sLit "the coercion of the method") <+> quotes (ppr meth)
+ , ptext (sLit "from type") <+> quotes (ppr ty1)
+ , nest 2 (ptext (sLit "to type") <+> quotes (ppr ty2)) ]
pprO StandAloneDerivOrigin = ptext (sLit "a 'deriving' declaration")
pprO DefaultOrigin = ptext (sLit "a 'default' declaration")
pprO DoOrigin = ptext (sLit "a do statement")
diff --git a/compiler/typecheck/TcRules.lhs b/compiler/typecheck/TcRules.lhs
index c2f3b6b30237..47b38f114bcc 100644
--- a/compiler/typecheck/TcRules.lhs
+++ b/compiler/typecheck/TcRules.lhs
@@ -6,7 +6,7 @@
TcRules: Typechecking transformation rules
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/typecheck/TcSMonad.lhs b/compiler/typecheck/TcSMonad.lhs
index 634e926a5ee9..9891f777950b 100644
--- a/compiler/typecheck/TcSMonad.lhs
+++ b/compiler/typecheck/TcSMonad.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE CPP, TypeFamilies #-}
+
-- Type definitions for the constraint solver
module TcSMonad (
@@ -134,7 +136,7 @@ import TcRnTypes
import BasicTypes
import Unique
import UniqFM
-import Maybes ( orElse, catMaybes, firstJust )
+import Maybes ( orElse, catMaybes, firstJusts )
import Pair ( pSnd )
import TrieMap
@@ -457,6 +459,7 @@ data InertSet
, inert_fsks :: [TcTyVar] -- Rigid flatten-skolems (arising from givens)
-- allocated in this local scope
+ -- See Note [Given flatten-skolems]
, inert_solved_funeqs :: FunEqMap (CtEvidence, TcType)
-- See Note [Type family equations]
@@ -474,8 +477,29 @@ data InertSet
-- - Stored not necessarily as fully rewritten
-- (ToDo: rewrite lazily when we lookup)
}
+\end{code}
+Note [Given flatten-skolems]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Suppose we simplify the implication
+ forall b. C (F a) b => (C (F a) beta, blah)
+We'll flatten the givens, introducing a flatten-skolem, so the
+givens effectively look like
+ (C fsk b, F a ~ fsk)
+Then we simplify the wanteds, transforming (C (F a) beta) to (C fsk beta).
+Now, if we don't solve that wanted, we'll put it back into the residual
+implication. But where is fsk bound?
+
+We solve this by recording the given flatten-skolems in the implication
+(the ic_fsks field), so it's as if we change the implication to
+ forall b, fsk. (C fsk b, F a ~ fsk) => (C fsk beta, blah)
+
+We don't need to explicitly record the (F a ~ fsk) constraint in the implication
+because we can recover it from inside the fsk TyVar itself. But we do need
+to treat that (F a ~ fsk) as a new given. See the fsk_bag stuff in
+TcInteract.solveInteractGiven.
+\begin{code}
instance Outputable InertCans where
ppr ics = vcat [ ptext (sLit "Equalities:")
<+> vcat (map ppr (varEnvElts (inert_eqs ics)))
@@ -502,9 +526,9 @@ emptyInert
, inert_funeqs = emptyFunEqs
, inert_irreds = emptyCts
, inert_insols = emptyCts
- , inert_no_eqs = True
+ , inert_no_eqs = True -- See Note [inert_fsks and inert_no_eqs]
}
- , inert_fsks = []
+ , inert_fsks = [] -- See Note [inert_fsks and inert_no_eqs]
, inert_flat_cache = emptyFunEqs
, inert_solved_funeqs = emptyFunEqs
, inert_solved_dicts = emptyDictMap }
@@ -517,10 +541,12 @@ addInertCan ics item@(CTyEqCan { cc_ev = ev })
(inert_eqs ics)
(cc_tyvar item) [item]
, inert_no_eqs = isFlatSkolEv ev && inert_no_eqs ics }
+ -- See Note [When does an implication have given equalities?] in TcSimplify
addInertCan ics item@(CFunEqCan { cc_fun = tc, cc_tyargs = tys, cc_ev = ev })
= ics { inert_funeqs = addFunEq (inert_funeqs ics) tc tys item
, inert_no_eqs = isFlatSkolEv ev && inert_no_eqs ics }
+ -- See Note [When does an implication have given equalities?] in TcSimplify
addInertCan ics item@(CIrredEvCan {})
= ics { inert_irreds = inert_irreds ics `Bag.snocBag` item
@@ -597,7 +623,7 @@ prepareInertsForImplications is
, inert_irreds = Bag.filterBag isGivenCt irreds
, inert_dicts = filterDicts isGivenCt dicts
, inert_insols = emptyCts
- , inert_no_eqs = True -- Ready for each implication
+ , inert_no_eqs = True -- See Note [inert_fsks and inert_no_eqs]
}
is_given_eq :: [Ct] -> Bool
@@ -723,9 +749,9 @@ lookupFlatEqn fam_tc tys
= do { IS { inert_solved_funeqs = solved_funeqs
, inert_flat_cache = flat_cache
, inert_cans = IC { inert_funeqs = inert_funeqs } } <- getTcSInerts
- ; return (findFunEq solved_funeqs fam_tc tys `firstJust`
- lookup_inerts inert_funeqs `firstJust`
- findFunEq flat_cache fam_tc tys) }
+ ; return (firstJusts [findFunEq solved_funeqs fam_tc tys,
+ lookup_inerts inert_funeqs,
+ findFunEq flat_cache fam_tc tys]) }
where
lookup_inerts inert_funeqs
| (ct:_) <- findFunEqs inert_funeqs fam_tc tys
@@ -1121,8 +1147,8 @@ nestImplicTcS ref inner_untch inerts (TcS thing_inside)
, tcs_ty_binds = ty_binds
, tcs_count = count
, tcs_inerts = new_inert_var
- , tcs_worklist = panic "nextImplicTcS: worklist"
- , tcs_implics = panic "nextImplicTcS: implics"
+ , tcs_worklist = panic "nestImplicTcS: worklist"
+ , tcs_implics = panic "nestImplicTcS: implics"
-- NB: Both these are initialised by withWorkList
}
; res <- TcM.setUntouchables inner_untch $
@@ -1150,8 +1176,8 @@ nestTcS (TcS thing_inside)
do { inerts <- TcM.readTcRef inerts_var
; new_inert_var <- TcM.newTcRef inerts
; let nest_env = env { tcs_inerts = new_inert_var
- , tcs_worklist = panic "nextImplicTcS: worklist"
- , tcs_implics = panic "nextImplicTcS: implics" }
+ , tcs_worklist = panic "nestTcS: worklist"
+ , tcs_implics = panic "nestTcS: implics" }
; thing_inside nest_env }
tryTcS :: TcS a -> TcS a
@@ -1169,8 +1195,8 @@ tryTcS (TcS thing_inside)
; let nest_env = env { tcs_ev_binds = ev_binds_var
, tcs_ty_binds = ty_binds_var
, tcs_inerts = is_var
- , tcs_worklist = panic "nextImplicTcS: worklist"
- , tcs_implics = panic "nextImplicTcS: implics" }
+ , tcs_worklist = panic "tryTcS: worklist"
+ , tcs_implics = panic "tryTcS: implics" }
; thing_inside nest_env }
-- Getters and setters of TcEnv fields
@@ -1253,19 +1279,35 @@ getUntouchables :: TcS Untouchables
getUntouchables = wrapTcS TcM.getUntouchables
getGivenInfo :: TcS a -> TcS (Bool, [TcTyVar], a)
--- Run thing_inside, returning info on
--- a) whether we got any new equalities
--- b) which new (given) flatten skolems were generated
+-- See Note [inert_fsks and inert_no_eqs]
getGivenInfo thing_inside
- = do { updInertTcS reset_vars
- ; res <- thing_inside
- ; is <- getTcSInerts
+ = do { updInertTcS reset_vars -- Set inert_fsks and inert_no_eqs to initial values
+ ; res <- thing_inside -- Run thing_inside
+ ; is <- getTcSInerts -- Get new values of inert_fsks and inert_no_eqs
; return (inert_no_eqs (inert_cans is), inert_fsks is, res) }
where
reset_vars :: InertSet -> InertSet
reset_vars is = is { inert_cans = (inert_cans is) { inert_no_eqs = True }
, inert_fsks = [] }
+\end{code}
+
+Note [inert_fsks and inert_no_eqs]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The function getGivenInfo runs thing_inside to see what new flatten-skolems
+and equalities are generated by thing_inside. To that end,
+ * it initialises inert_fsks, inert_no_eqs
+ * runs thing_inside
+ * reads out inert_fsks, inert_no_eqs
+This is the only place where it matters what inert_fsks and inert_no_eqs
+are initialised to. In other places (eg emptyIntert), we need to set them
+to something (because they are strict) but they will never be looked at.
+
+See Note [When does an implication have given equalities?] in TcSimplify
+for more details about inert_no_eqs.
+See Note [Given flatten-skolems] for more details about inert_fsks.
+
+\begin{code}
getTcSTyBinds :: TcS (IORef (Bool, TyVarEnv (TcTyVar, TcType)))
getTcSTyBinds = TcS (return . tcs_ty_binds)
@@ -1350,7 +1392,7 @@ checkWellStagedDFun pred dfun_id loc
bind_lvl = TcM.topIdLvl dfun_id
pprEq :: TcType -> TcType -> SDoc
-pprEq ty1 ty2 = pprType $ mkEqPred ty1 ty2
+pprEq ty1 ty2 = pprParendType ty1 <+> char '~' <+> pprParendType ty2
isTouchableMetaTyVarTcS :: TcTyVar -> TcS Bool
isTouchableMetaTyVarTcS tv
@@ -1516,6 +1558,8 @@ data XEvTerm
= XEvTerm { ev_preds :: [PredType] -- New predicate types
, ev_comp :: [EvTerm] -> EvTerm -- How to compose evidence
, ev_decomp :: EvTerm -> [EvTerm] -- How to decompose evidence
+ -- In both ev_comp and ev_decomp, the [EvTerm] is 1-1 with ev_preds
+ -- and each EvTerm has type of the corresponding EvPred
}
data MaybeNew = Fresh CtEvidence | Cached EvTerm
@@ -1602,16 +1646,16 @@ Note [xCFlavor]
~~~~~~~~~~~~~~~
A call might look like this:
- xCtFlavor ev subgoal-preds evidence-transformer
+ xCtEvidence ev evidence-transformer
- ev is Given => use ev_decomp to create new Givens for subgoal-preds,
+ ev is Given => use ev_decomp to create new Givens for ev_preds,
and return them
- ev is Wanted => create new wanteds for subgoal-preds,
+ ev is Wanted => create new wanteds for ev_preds,
use ev_comp to bind ev,
return fresh wanteds (ie ones not cached in inert_cans or solved)
- ev is Derived => create new deriveds for subgoal-preds
+ ev is Derived => create new deriveds for ev_preds
(unless cached in inert_cans or solved)
Note: The [CtEvidence] returned is a subset of the subgoal-preds passed in
@@ -1638,14 +1682,10 @@ See Note [Coercion evidence terms] in TcEvidence.
Note [Do not create Given kind equalities]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We do not want to create a Given like
+We do not want to create a Given kind equality like
- kv ~ k -- kv is a skolem kind variable
- -- Reason we don't yet support non-Refl kind equalities
-
-or t1::k1 ~ t2::k2 -- k1 and k2 are un-equal kinds
- -- Reason: (~) is kind-uniform at the moment, and
- -- k1/k2 may be distinct kind skolems
+ [G] kv ~ k -- kv is a skolem kind variable
+ -- Reason we don't yet support non-Refl kind equalities
This showed up in Trac #8566, where we had a data type
data I (u :: U *) (r :: [*]) :: * where
@@ -1656,16 +1696,26 @@ so A has type
(u ~ AA * k t as) => I u r
There is no direct kind equality, but in a pattern match where 'u' is
-instantiated to, say, (AA * kk t1 as1), we'd decompose to get
+instantiated to, say, (AA * kk (t1:kk) as1), we'd decompose to get
k ~ kk, t ~ t1, as ~ as1
-This is bad. We "fix" this by simply ignoring
- * the Given kind equality
- * AND the Given type equality (t:k1) ~ (t1:kk)
-
+This is bad. We "fix" this by simply ignoring the Given kind equality
But the Right Thing is to add kind equalities!
+But note (Trac #8705) that we *do* create Given (non-canonical) equalities
+with un-equal kinds, e.g.
+ [G] t1::k1 ~ t2::k2 -- k1 and k2 are un-equal kinds
+Reason: k1 or k2 might be unification variables that have already been
+unified (at this point we have not canonicalised the types), so we want
+to emit this t1~t2 as a (non-canonical) Given in the work-list. If k1/k2
+have been unified, we'll find that when we canonicalise it, and the
+t1~t2 information may be crucial (Trac #8705 is an example).
+
+If it turns out that k1 and k2 are really un-equal, then it'll end up
+as an Irreducible (see Note [Equalities with incompatible kinds] in
+TcCanonical), and will do no harm.
+
\begin{code}
-xCtEvidence :: CtEvidence -- Original flavor
+xCtEvidence :: CtEvidence -- Original evidence
-> XEvTerm -- Instructions about how to manipulate evidence
-> TcS [CtEvidence]
@@ -1677,8 +1727,8 @@ xCtEvidence (CtGiven { ctev_evtm = tm, ctev_loc = loc })
where
-- See Note [Do not create Given kind equalities]
bad_given_pred (pred_ty, _)
- | EqPred t1 t2 <- classifyPredType pred_ty
- = isKind t1 || not (typeKind t1 `tcEqKind` typeKind t2)
+ | EqPred t1 _ <- classifyPredType pred_ty
+ = isKind t1
| otherwise
= False
@@ -1718,7 +1768,18 @@ Main purpose: create new evidence for new_pred;
Given Already in inert Nothing
Not Just new_evidence
--}
+
+Note [Rewriting with Refl]
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+If the coercion is just reflexivity then you may re-use the same
+variable. But be careful! Although the coercion is Refl, new_pred
+may reflect the result of unification alpha := ty, so new_pred might
+not _look_ the same as old_pred, and it's vital to proceed from now on
+using new_pred.
+
+The flattener preserves type synonyms, so they should appear in new_pred
+as well as in old_pred; that is important for good error messages.
+ -}
rewriteEvidence (CtDerived { ctev_loc = loc }) new_pred _co
@@ -1732,15 +1793,8 @@ rewriteEvidence (CtDerived { ctev_loc = loc }) new_pred _co
newDerived loc new_pred
rewriteEvidence old_ev new_pred co
- | isTcReflCo co -- If just reflexivity then you may re-use the same variable
- = return (Just (if ctEvPred old_ev `tcEqType` new_pred
- then old_ev
- else old_ev { ctev_pred = new_pred }))
- -- Even if the coercion is Refl, it might reflect the result of unification alpha := ty
- -- so old_pred and new_pred might not *look* the same, and it's vital to proceed from
- -- now on using new_pred.
- -- However, if they *do* look the same, we'd prefer to stick with old_pred
- -- then retain the old type, so that error messages come out mentioning synonyms
+ | isTcReflCo co -- See Note [Rewriting with Refl]
+ = return (Just (old_ev { ctev_pred = new_pred }))
rewriteEvidence (CtGiven { ctev_evtm = old_tm , ctev_loc = loc }) new_pred co
= do { new_ev <- newGivenEvVar loc (new_pred, new_tm) -- See Note [Bind new Givens immediately]
@@ -1780,15 +1834,12 @@ rewriteEqEvidence :: CtEvidence -- Old evidence :: olhs ~ orhs (not swap
-- It's all a form of rewwriteEvidence, specialised for equalities
rewriteEqEvidence old_ev swapped nlhs nrhs lhs_co rhs_co
| CtDerived { ctev_loc = loc } <- old_ev
- = newDerived loc (mkEqPred nlhs nrhs)
+ = newDerived loc (mkTcEqPred nlhs nrhs)
| NotSwapped <- swapped
- , isTcReflCo lhs_co
+ , isTcReflCo lhs_co -- See Note [Rewriting with Refl]
, isTcReflCo rhs_co
- , let new_pred = mkTcEqPred nlhs nrhs
- = return (Just (if ctEvPred old_ev `tcEqType` new_pred
- then old_ev
- else old_ev { ctev_pred = new_pred }))
+ = return (Just (old_ev { ctev_pred = new_pred }))
| CtGiven { ctev_evtm = old_tm , ctev_loc = loc } <- old_ev
= do { let new_tm = EvCoercion (lhs_co
@@ -1810,7 +1861,7 @@ rewriteEqEvidence old_ev swapped nlhs nrhs lhs_co rhs_co
| otherwise
= panic "rewriteEvidence"
where
- new_pred = mkEqPred nlhs nrhs
+ new_pred = mkTcEqPred nlhs nrhs
maybeSym :: SwapFlag -> TcCoercion -> TcCoercion
maybeSym IsSwapped co = mkTcSymCo co
diff --git a/compiler/typecheck/TcSimplify.lhs b/compiler/typecheck/TcSimplify.lhs
index 0fdd2ba3f5c5..dde5902ccc9c 100644
--- a/compiler/typecheck/TcSimplify.lhs
+++ b/compiler/typecheck/TcSimplify.lhs
@@ -1,4 +1,6 @@
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcSimplify(
simplifyInfer, quantifyPred,
simplifyAmbiguityCheck,
@@ -16,7 +18,7 @@ import TcMType as TcM
import TcType
import TcSMonad as TcS
import TcInteract
-import Kind ( defaultKind_maybe )
+import Kind ( isKind, defaultKind_maybe )
import Inst
import FunDeps ( growThetaTyVars )
import Type ( classifyPredType, PredTree(..), getClassPredTys_maybe )
@@ -95,10 +97,9 @@ simpl_top wanteds
try_class_defaulting :: WantedConstraints -> TcS WantedConstraints
try_class_defaulting wc
- | isEmptyWC wc || insolubleWC wc
- = return wc -- Don't do type-class defaulting if there are insolubles
- -- Doing so is not going to solve the insolubles
- | otherwise
+ | isEmptyWC wc
+ = return wc
+ | otherwise -- See Note [When to do type-class defaulting]
= do { something_happened <- applyDefaultingRules (approximateWC wc)
-- See Note [Top-level Defaulting Plan]
; if something_happened
@@ -107,6 +108,33 @@ simpl_top wanteds
else return wc }
\end{code}
+Note [When to do type-class defaulting]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In GHC 7.6 and 7.8.2, we did type-class defaulting only if insolubleWC
+was false, on the grounds that defaulting can't help solve insoluble
+constraints. But if we *don't* do defaulting we may report a whole
+lot of errors that would be solved by defaulting; these errors are
+quite spurious because fixing the single insoluble error means that
+defaulting happens again, which makes all the other errors go away.
+This is jolly confusing: Trac #9033.
+
+So it seems better to always do type-class defaulting.
+
+However, always doing defaulting does mean that we'll do it in
+situations like this (Trac #5934):
+ run :: (forall s. GenST s) -> Int
+ run = fromInteger 0
+We don't unify the return type of fromInteger with the given function
+type, because the latter involves foralls. So we're left with
+ (Num alpha, alpha ~ (forall s. GenST s) -> Int)
+Now we do defaulting, get alpha := Integer, and report that we can't
+match Integer with (forall s. GenST s) -> Int. That's not totally
+stupid, but perhaps a little strange.
+
+Another potential alternative would be to suppress *all* non-insoluble
+errors if there are *any* insoluble errors, anywhere, but that seems
+too drastic.
+
Note [Must simplify after defaulting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We may have a deeply buried constraint
@@ -253,39 +281,50 @@ simplifyInfer _top_lvl apply_mr name_taus wanteds
; ev_binds_var <- newTcEvBinds
; wanted_transformed_incl_derivs
<- solveWantedsTcMWithEvBinds ev_binds_var wanteds solve_wanteds
- -- Post: wanted_transformed are zonked
+ -- Post: wanted_transformed_incl_derivs are zonked
-- Step 4) Candidates for quantification are an approximation of wanted_transformed
-- NB: Already the fixpoint of any unifications that may have happened
-- NB: We do not do any defaulting when inferring a type, this can lead
-- to less polymorphic types, see Note [Default while Inferring]
- -- Step 5) Minimize the quantification candidates
- -- Step 6) Final candidates for quantification
- -- We discard bindings, insolubles etc, because all we are
- -- care aout it
-
; tc_lcl_env <- TcRnMonad.getLclEnv
; let untch = tcl_untch tc_lcl_env
wanted_transformed = dropDerivedWC wanted_transformed_incl_derivs
; quant_pred_candidates -- Fully zonked
<- if insolubleWC wanted_transformed_incl_derivs
then return [] -- See Note [Quantification with errors]
- -- NB: must include derived errors
- else do { gbl_tvs <- tcGetGlobalTyVars
- ; let quant_cand = approximateWC wanted_transformed
+ -- NB: must include derived errors in this test,
+ -- hence "incl_derivs"
+
+ else do { let quant_cand = approximateWC wanted_transformed
meta_tvs = filter isMetaTyVar (varSetElems (tyVarsOfCts quant_cand))
- ; ((flats, _insols), _extra_binds) <- runTcS $
+ ; gbl_tvs <- tcGetGlobalTyVars
+ ; null_ev_binds_var <- newTcEvBinds
+ -- Miminise quant_cand. We are not interested in any evidence
+ -- produced, because we are going to simplify wanted_transformed
+ -- again later. All we want here is the predicates over which to
+ -- quantify.
+ --
+ -- If any meta-tyvar unifications take place (unlikely), we'll
+ -- pick that up later.
+
+ ; (flats, _insols) <- runTcSWithEvBinds null_ev_binds_var $
do { mapM_ (promoteAndDefaultTyVar untch gbl_tvs) meta_tvs
-- See Note [Promote _and_ default when inferring]
; _implics <- solveInteract quant_cand
; getInertUnsolved }
- ; return (map ctPred $ filter isWantedCt (bagToList flats)) }
- -- NB: Dimitrios is slightly worried that we will get
- -- family equalities (F Int ~ alpha) in the quantification
- -- candidates, as we have performed no further unflattening
- -- at this point. Nothing bad, but inferred contexts might
- -- look complicated.
+
+ ; flats' <- zonkFlats null_ev_binds_var untch $
+ filterBag isWantedCt flats
+ -- The quant_cand were already fully zonked, so this zonkFlats
+ -- really only unflattens the flattening that solveInteract
+ -- may have done (Trac #8889).
+ -- E.g. quant_cand = F a, where F :: * -> Constraint
+ -- We'll flatten to (alpha, F a ~ alpha)
+ -- fail to make any further progress and must unflatten again
+
+ ; return (map ctPred $ bagToList flats') }
-- NB: quant_pred_candidates is already the fixpoint of any
-- unifications that may have happened
@@ -326,6 +365,7 @@ simplifyInfer _top_lvl apply_mr name_taus wanteds
{ -- Step 7) Emit an implication
let minimal_flat_preds = mkMinimalBySCs bound
+ -- See Note [Minimize by Superclasses]
skol_info = InferSkol [ (name, mkSigmaTy [] minimal_flat_preds ty)
| (name, ty) <- name_taus ]
-- Don't add the quantified variables here, because
@@ -481,11 +521,11 @@ This only half-works, but then let-generalisation only half-works.
* *
***********************************************************************************
-See note [Simplifying RULE consraints] in TcRule
+See note [Simplifying RULE constraints] in TcRule
-Note [RULE quanfification over equalities]
+Note [RULE quantification over equalities]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Decideing which equalities to quantify over is tricky:
+Deciding which equalities to quantify over is tricky:
* We do not want to quantify over insoluble equalities (Int ~ Bool)
(a) because we prefer to report a LHS type error
(b) because if such things end up in 'givens' we get a bogus
@@ -803,39 +843,6 @@ Consider floated_eqs (all wanted or derived):
simpl_loop. So we iterate if there any of these
\begin{code}
-floatEqualities :: [TcTyVar] -> Bool -> WantedConstraints
- -> TcS (Cts, WantedConstraints)
--- Post: The returned floated constraints (Cts) are only Wanted or Derived
--- and come from the input wanted ev vars or deriveds
--- Also performs some unifications, adding to monadically-carried ty_binds
--- These will be used when processing floated_eqs later
-floatEqualities skols no_given_eqs wanteds@(WC { wc_flat = flats })
- | not no_given_eqs -- There are some given equalities, so don't float
- = return (emptyBag, wanteds) -- Note [Float Equalities out of Implications]
- | otherwise
- = do { let (float_eqs, remaining_flats) = partitionBag is_floatable flats
- ; untch <- TcS.getUntouchables
- ; mapM_ (promoteTyVar untch) (varSetElems (tyVarsOfCts float_eqs))
- -- See Note [Promoting unification variables]
- ; ty_binds <- getTcSTyBindsMap
- ; traceTcS "floatEqualities" (vcat [ text "Flats =" <+> ppr flats
- , text "Floated eqs =" <+> ppr float_eqs
- , text "Ty binds =" <+> ppr ty_binds])
- ; return (float_eqs, wanteds { wc_flat = remaining_flats }) }
- where
- -- See Note [Float equalities from under a skolem binding]
- skol_set = fixVarSet mk_next (mkVarSet skols)
- mk_next tvs = foldrBag grow_one tvs flats
- grow_one (CFunEqCan { cc_tyargs = xis, cc_rhs = rhs }) tvs
- | intersectsVarSet tvs (tyVarsOfTypes xis)
- = tvs `unionVarSet` tyVarsOfType rhs
- grow_one _ tvs = tvs
-
- is_floatable :: Ct -> Bool
- is_floatable ct = isEqPred pred && skol_set `disjointVarSet` tyVarsOfType pred
- where
- pred = ctPred ct
-
promoteTyVar :: Untouchables -> TcTyVar -> TcS ()
-- When we float a constraint out of an implication we must restore
-- invariant (MetaTvInv) in Note [Untouchable type variables] in TcType
@@ -996,6 +1003,80 @@ should! If we don't solve the constraint, we'll stupidly quantify over
(b:*) instead of (a:OpenKind), which can lead to disaster; see Trac #7332.
Trac #7641 is a simpler example.
+Note [Promoting unification variables]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When we float an equality out of an implication we must "promote" free
+unification variables of the equality, in order to maintain Invariant
+(MetaTvInv) from Note [Untouchable type variables] in TcType. for the
+leftover implication.
+
+This is absolutely necessary. Consider the following example. We start
+with two implications and a class with a functional dependency.
+
+ class C x y | x -> y
+ instance C [a] [a]
+
+ (I1) [untch=beta]forall b. 0 => F Int ~ [beta]
+ (I2) [untch=beta]forall c. 0 => F Int ~ [[alpha]] /\ C beta [c]
+
+We float (F Int ~ [beta]) out of I1, and we float (F Int ~ [[alpha]]) out of I2.
+They may react to yield that (beta := [alpha]) which can then be pushed inwards
+the leftover of I2 to get (C [alpha] [a]) which, using the FunDep, will mean that
+(alpha := a). In the end we will have the skolem 'b' escaping in the untouchable
+beta! Concrete example is in indexed_types/should_fail/ExtraTcsUntch.hs:
+
+ class C x y | x -> y where
+ op :: x -> y -> ()
+
+ instance C [a] [a]
+
+ type family F a :: *
+
+ h :: F Int -> ()
+ h = undefined
+
+ data TEx where
+ TEx :: a -> TEx
+
+
+ f (x::beta) =
+ let g1 :: forall b. b -> ()
+ g1 _ = h [x]
+ g2 z = case z of TEx y -> (h [[undefined]], op x [y])
+ in (g1 '3', g2 undefined)
+
+
+
+Note [Solving Family Equations]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+After we are done with simplification we may be left with constraints of the form:
+ [Wanted] F xis ~ beta
+If 'beta' is a touchable unification variable not already bound in the TyBinds
+then we'd like to create a binding for it, effectively "defaulting" it to be 'F xis'.
+
+When is it ok to do so?
+ 1) 'beta' must not already be defaulted to something. Example:
+
+ [Wanted] F Int ~ beta <~ Will default [beta := F Int]
+ [Wanted] F Char ~ beta <~ Already defaulted, can't default again. We
+ have to report this as unsolved.
+
+ 2) However, we must still do an occurs check when defaulting (F xis ~ beta), to
+ set [beta := F xis] only if beta is not among the free variables of xis.
+
+ 3) Notice that 'beta' can't be bound in ty binds already because we rewrite RHS
+ of type family equations. See Inert Set invariants in TcInteract.
+
+This solving is now happening during zonking, see Note [Unflattening while zonking]
+in TcMType.
+
+
+*********************************************************************************
+* *
+* Floating equalities *
+* *
+*********************************************************************************
+
Note [Float Equalities out of Implications]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For ordinary pattern matches (including existentials) we float
@@ -1041,8 +1122,59 @@ Consequence: classes with functional dependencies don't matter (since there is
no evidence for a fundep equality), but equality superclasses do matter (since
they carry evidence).
+\begin{code}
+floatEqualities :: [TcTyVar] -> Bool -> WantedConstraints
+ -> TcS (Cts, WantedConstraints)
+-- Main idea: see Note [Float Equalities out of Implications]
+--
+-- Post: The returned floated constraints (Cts) are only Wanted or Derived
+-- and come from the input wanted ev vars or deriveds
+-- Also performs some unifications (via promoteTyVar), adding to
+-- monadically-carried ty_binds. These will be used when processing
+-- floated_eqs later
+--
+-- Subtleties: Note [Float equalities from under a skolem binding]
+-- Note [Skolem escape]
+floatEqualities skols no_given_eqs wanteds@(WC { wc_flat = flats })
+ | not no_given_eqs -- There are some given equalities, so don't float
+ = return (emptyBag, wanteds) -- Note [Float Equalities out of Implications]
+ | otherwise
+ = do { let (float_eqs, remaining_flats) = partitionBag is_floatable flats
+ ; untch <- TcS.getUntouchables
+ ; mapM_ (promoteTyVar untch) (varSetElems (tyVarsOfCts float_eqs))
+ -- See Note [Promoting unification variables]
+ ; ty_binds <- getTcSTyBindsMap
+ ; traceTcS "floatEqualities" (vcat [ text "Skols =" <+> ppr skols
+ , text "Flats =" <+> ppr flats
+ , text "Skol set =" <+> ppr skol_set
+ , text "Floated eqs =" <+> ppr float_eqs
+ , text "Ty binds =" <+> ppr ty_binds])
+ ; return (float_eqs, wanteds { wc_flat = remaining_flats }) }
+ where
+ is_floatable :: Ct -> Bool
+ is_floatable ct
+ = case classifyPredType (ctPred ct) of
+ EqPred ty1 ty2 -> skol_set `disjointVarSet` tyVarsOfType ty1
+ && skol_set `disjointVarSet` tyVarsOfType ty2
+ _ -> False
+
+ skol_set = fixVarSet mk_next (mkVarSet skols)
+ mk_next tvs = foldr grow_one tvs flat_eqs
+ flat_eqs :: [(TcTyVarSet, TcTyVarSet)]
+ flat_eqs = [ (tyVarsOfType ty1, tyVarsOfType ty2)
+ | EqPred ty1 ty2 <- map (classifyPredType . ctPred) (bagToList flats)]
+ grow_one (tvs1,tvs2) tvs
+ | intersectsVarSet tvs tvs1 = tvs `unionVarSet` tvs2
+ | intersectsVarSet tvs tvs2 = tvs `unionVarSet` tvs2
+ | otherwise = tvs
+\end{code}
+
Note [When does an implication have given equalities?]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ NB: This note is mainly referred to from TcSMonad
+ but it relates to floating equalities, so I've
+ left it here
+
Consider an implication
beta => alpha ~ Int
where beta is a unification variable that has already been unified
@@ -1084,118 +1216,97 @@ An alternative we considered was to
equalities mentions any of the ic_givens of this implication.
This seems like the Right Thing, but it's more code, and more work
at runtime, so we are using the FlatSkolOrigin idea intead. It's less
-obvious that it works, but I htink it does, and it's simple and efficient.
-
+obvious that it works, but I think it does, and it's simple and efficient.
Note [Float equalities from under a skolem binding]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You might worry about skolem escape with all this floating.
-For example, consider
- [2] forall a. (a ~ F beta[2] delta,
- Maybe beta[2] ~ gamma[1])
-
-The (Maybe beta ~ gamma) doesn't mention 'a', so we float it, and
-solve with gamma := beta. But what if later delta:=Int, and
- F b Int = b.
-Then we'd get a ~ beta[2], and solve to get beta:=a, and now the
-skolem has escaped!
-
-But it's ok: when we float (Maybe beta[2] ~ gamma[1]), we promote beta[2]
-to beta[1], and that means the (a ~ beta[1]) will be stuck, as it should be.
-
-Previously we tried to "grow" the skol_set with the constraints, to get
-all the tyvars that could *conceivably* unify with the skolems, but that
-was far too conservative (Trac #7804). Example: this should be fine:
- f :: (forall a. a -> Proxy x -> Proxy (F x)) -> Int
- f = error "Urk" :: (forall a. a -> Proxy x -> Proxy (F x)) -> Int
-
-BUT (sigh) we have to be careful. Here are some edge cases:
+Which of the flat equalities can we float out? Obviously, only
+ones that don't mention the skolem-bound variables. But that is
+over-eager. Consider
+ [2] forall a. F a beta[1] ~ gamma[2], G beta[1] gamma[2] ~ Int
+The second constraint doesn't mention 'a'. But if we float it
+we'll promote gamma to gamma'[1]. Now suppose that we learn that
+beta := Bool, and F a Bool = a, and G Bool _ = Int. Then we'll
+we left with the constraint
+ [2] forall a. a ~ gamma'[1]
+which is insoluble because gamma became untouchable.
+
+Solution: only promote a constraint if its free variables cannot
+possibly be connected with the skolems. Procedurally, start with
+the skolems and "grow" that set as follows:
+ * For each flat equality F ts ~ s, or tv ~ s,
+ if the current set intersects with the LHS of the equality,
+ add the free vars of the RHS, and vice versa
+That gives us a grown skolem set. Now float an equality if its free
+vars don't intersect the grown skolem set.
+
+This seems very ad hoc (sigh). But here are some tricky edge cases:
a) [2]forall a. (F a delta[1] ~ beta[2], delta[1] ~ Maybe beta[2])
-b) [2]forall a. (F b ty ~ beta[2], G beta[2] ~ gamma[2])
+b1) [2]forall a. (F a ty ~ beta[2], G beta[2] ~ gamma[2])
+b2) [2]forall a. (a ~ beta[2], G beta[2] ~ gamma[2])
c) [2]forall a. (F a ty ~ beta[2], delta[1] ~ Maybe beta[2])
+d) [2]forall a. (gamma[1] ~ Tree beta[2], F ty ~ beta[2])
In (a) we *must* float out the second equality,
else we can't solve at all (Trac #7804).
-In (b) we *must not* float out the second equality.
- It will ultimately be solved (by flattening) in situ, but if we
- float it we'll promote beta,gamma, and render the first equality insoluble.
+In (b1, b2) we *must not* float out the second equality.
+ It will ultimately be solved (by flattening) in situ, but if we float
+ it we'll promote beta,gamma, and render the first equality insoluble.
+
+ Trac #9316 was an example of (b2). You may wonder why (a ~ beta[2]) isn't
+ solved; in #9316 it wasn't solved because (a:*) and (beta:kappa[1]), so the
+ equality was kind-mismatched, and hence was a CIrredEvCan. There was
+ another equality alongside, (kappa[1] ~ *). We must first float *that*
+ one out and *then* we can solve (a ~ beta).
In (c) it would be OK to float the second equality but better not to.
If we flatten we see (delta[1] ~ Maybe (F a ty)), which is a
- skolem-escape problem. If we float the secodn equality we'll
+ skolem-escape problem. If we float the second equality we'll
end up with (F a ty ~ beta'[1]), which is a less explicable error.
-Hence we start with the skolems, grow them by the CFunEqCans, and
-float ones that don't mention the grown variables. Seems very ad hoc.
-
-Note [Promoting unification variables]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-When we float an equality out of an implication we must "promote" free
-unification variables of the equality, in order to maintain Invariant
-(MetaTvInv) from Note [Untouchable type variables] in TcType. for the
-leftover implication.
-
-This is absolutely necessary. Consider the following example. We start
-with two implications and a class with a functional dependency.
-
- class C x y | x -> y
- instance C [a] [a]
-
- (I1) [untch=beta]forall b. 0 => F Int ~ [beta]
- (I2) [untch=beta]forall c. 0 => F Int ~ [[alpha]] /\ C beta [c]
-
-We float (F Int ~ [beta]) out of I1, and we float (F Int ~ [[alpha]]) out of I2.
-They may react to yield that (beta := [alpha]) which can then be pushed inwards
-the leftover of I2 to get (C [alpha] [a]) which, using the FunDep, will mean that
-(alpha := a). In the end we will have the skolem 'b' escaping in the untouchable
-beta! Concrete example is in indexed_types/should_fail/ExtraTcsUntch.hs:
-
- class C x y | x -> y where
- op :: x -> y -> ()
-
- instance C [a] [a]
+In (d) we must float the first equality, so that we can unify gamma.
+ But that promotes beta, so we must float the second equality too,
+ Trac #7196 exhibits this case
- type family F a :: *
+Some notes
- h :: F Int -> ()
- h = undefined
+* When "growing", do not simply take the free vars of the predicate!
+ Example [2]forall a. (a:* ~ beta[2]:kappa[1]), (kappa[1] ~ *)
+ We must float the second, and we must not float the first.
+ But the first actually looks like ((~) kappa a beta), so if we just
+ look at its free variables we'll see {a,kappa,beta), and that might
+ make us think kappa should be in the grown skol set.
- data TEx where
- TEx :: a -> TEx
+ (In any case, the kind argument for a kind-mis-matched equality like
+ this one doesn't really make sense anyway.)
+ That's why we use classifyPred when growing.
- f (x::beta) =
- let g1 :: forall b. b -> ()
- g1 _ = h [x]
- g2 z = case z of TEx y -> (h [[undefined]], op x [y])
- in (g1 '3', g2 undefined)
-
-
-
-Note [Solving Family Equations]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-After we are done with simplification we may be left with constraints of the form:
- [Wanted] F xis ~ beta
-If 'beta' is a touchable unification variable not already bound in the TyBinds
-then we'd like to create a binding for it, effectively "defaulting" it to be 'F xis'.
-
-When is it ok to do so?
- 1) 'beta' must not already be defaulted to something. Example:
+* Previously we tried to "grow" the skol_set with *all* the
+ constraints (not just equalities), to get all the tyvars that could
+ *conceivably* unify with the skolems, but that was far too
+ conservative (Trac #7804). Example: this should be fine:
+ f :: (forall a. a -> Proxy x -> Proxy (F x)) -> Int
+ f = error "Urk" :: (forall a. a -> Proxy x -> Proxy (F x)) -> Int
- [Wanted] F Int ~ beta <~ Will default [beta := F Int]
- [Wanted] F Char ~ beta <~ Already defaulted, can't default again. We
- have to report this as unsolved.
- 2) However, we must still do an occurs check when defaulting (F xis ~ beta), to
- set [beta := F xis] only if beta is not among the free variables of xis.
+Note [Skolem escape]
+~~~~~~~~~~~~~~~~~~~~
+You might worry about skolem escape with all this floating.
+For example, consider
+ [2] forall a. (a ~ F beta[2] delta,
+ Maybe beta[2] ~ gamma[1])
- 3) Notice that 'beta' can't be bound in ty binds already because we rewrite RHS
- of type family equations. See Inert Set invariants in TcInteract.
+The (Maybe beta ~ gamma) doesn't mention 'a', so we float it, and
+solve with gamma := beta. But what if later delta:=Int, and
+ F b Int = b.
+Then we'd get a ~ beta[2], and solve to get beta:=a, and now the
+skolem has escaped!
-This solving is now happening during zonking, see Note [Unflattening while zonking]
-in TcMType.
+But it's ok: when we float (Maybe beta[2] ~ gamma[1]), we promote beta[2]
+to beta[1], and that means the (a ~ beta[1]) will be stuck, as it should be.
*********************************************************************************
@@ -1237,16 +1348,22 @@ findDefaultableGroups
-> Cts -- Unsolved (wanted or derived)
-> [[(Ct,Class,TcTyVar)]]
findDefaultableGroups (default_tys, (ovl_strings, extended_defaults)) wanteds
- | null default_tys = []
- | otherwise = filter is_defaultable_group (equivClasses cmp_tv unaries)
+ | null default_tys = []
+ | otherwise = defaultable_groups
where
+ defaultable_groups = filter is_defaultable_group groups
+ groups = equivClasses cmp_tv unaries
unaries :: [(Ct, Class, TcTyVar)] -- (C tv) constraints
non_unaries :: [Ct] -- and *other* constraints
(unaries, non_unaries) = partitionWith find_unary (bagToList wanteds)
-- Finds unary type-class constraints
+ -- But take account of polykinded classes like Typeable,
+ -- which may look like (Typeable * (a:*)) (Trac #8931)
find_unary cc
- | Just (cls,[ty]) <- getClassPredTys_maybe (ctPred cc)
+ | Just (cls,tys) <- getClassPredTys_maybe (ctPred cc)
+ , Just (kinds, ty) <- snocView tys
+ , all isKind kinds
, Just tv <- tcGetTyVar_maybe ty
, isMetaTyVar tv -- We might have runtime-skolems in GHCi, and
-- we definitely don't want to try to assign to those!
diff --git a/compiler/typecheck/TcSplice.lhs b/compiler/typecheck/TcSplice.lhs
index b7e26997c6d3..bb6af8cb95c2 100644
--- a/compiler/typecheck/TcSplice.lhs
+++ b/compiler/typecheck/TcSplice.lhs
@@ -7,8 +7,9 @@ TcSplice: Template Haskell splices
\begin{code}
-{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE CPP, FlexibleInstances, MagicHash, ScopedTypeVariables #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
+
module TcSplice(
-- These functions are defined in stage1 and stage2
-- The raise civilised errors in stage1
@@ -70,6 +71,7 @@ import Class
import Inst
import TyCon
import CoAxiom
+import PatSyn ( patSynName )
import ConLike
import DataCon
import TcEvidence( TcEvBinds(..) )
@@ -342,7 +344,7 @@ tcTypedBracket brack@(TExpBr expr) res_ty
-- Throw away the typechecked expression but return its type.
-- We'll typecheck it again when we splice it in somewhere
; (_tc_expr, expr_ty) <- setStage (Brack cur_stage (TcPending ps_ref lie_var)) $
- tcInferRhoNC expr
+ tcInferRhoNC expr
-- NC for no context; tcBracket does that
; meta_ty <- tcTExpTy expr_ty
@@ -844,6 +846,12 @@ like that. Here's how it's processed:
(qReport True s) by using addErr to add an error message to the bag of errors.
The 'fail' in TcM raises an IOEnvFailure exception
+ * 'qReport' forces the message to ensure any exception hidden in unevaluated
+ thunk doesn't get into the bag of errors. Otherwise the following splice
+ will triger panic (Trac #8987):
+ $(fail undefined)
+ See also Note [Concealed TH exceptions]
+
* So, when running a splice, we catch all exceptions; then for
- an IOEnvFailure exception, we assume the error is already
in the error-bag (above)
@@ -874,8 +882,10 @@ instance TH.Quasi (IOEnv (Env TcGblEnv TcLclEnv)) where
; let i = getKey u
; return (TH.mkNameU s i) }
- qReport True msg = addErr (text msg)
- qReport False msg = addWarn (text msg)
+ -- 'msg' is forced to ensure exceptions don't escape,
+ -- see Note [Exceptions in TH]
+ qReport True msg = seqList msg $ addErr (text msg)
+ qReport False msg = seqList msg $ addWarn (text msg)
qLocation = do { m <- getModule
; l <- getSrcSpanM
@@ -885,7 +895,7 @@ instance TH.Quasi (IOEnv (Env TcGblEnv TcLclEnv)) where
RealSrcSpan s -> return s
; return (TH.Loc { TH.loc_filename = unpackFS (srcSpanFile r)
, TH.loc_module = moduleNameString (moduleName m)
- , TH.loc_package = packageIdString (modulePackageId m)
+ , TH.loc_package = packageKeyString (modulePackageKey m)
, TH.loc_start = (srcSpanStartLine r, srcSpanStartCol r)
, TH.loc_end = (srcSpanEndLine r, srcSpanEndCol r) }) }
@@ -1015,7 +1025,7 @@ reifyInstances th_nm th_tys
; let matches = lookupFamInstEnv inst_envs tc tys
; traceTc "reifyInstances2" (ppr matches)
; mapM (reifyFamilyInstance . fim_instance) matches }
- _ -> bale_out (hang (ptext (sLit "reifyInstances:") <+> quotes (ppr ty))
+ _ -> bale_out (hang (ptext (sLit "reifyInstances:") <+> quotes (ppr ty))
2 (ptext (sLit "is not a class constraint or type family application"))) }
where
doc = ClassInstanceCtx
@@ -1173,6 +1183,8 @@ reifyThing (AGlobal (AConLike (RealDataCon dc)))
; return (TH.DataConI (reifyName name) ty
(reifyName (dataConOrigTyCon dc)) fix)
}
+reifyThing (AGlobal (AConLike (PatSynCon ps)))
+ = noTH (sLit "pattern synonyms") (ppr $ patSynName ps)
reifyThing (ATcId {tct_id = id})
= do { ty1 <- zonkTcType (idType id) -- Make use of all the info we have, even
@@ -1191,7 +1203,8 @@ reifyThing thing = pprPanic "reifyThing" (pprTcTyThingCategory thing)
-------------------------------------------
reifyAxBranch :: CoAxBranch -> TcM TH.TySynEqn
reifyAxBranch (CoAxBranch { cab_lhs = args, cab_rhs = rhs })
- = do { args' <- mapM reifyType args
+ -- remove kind patterns (#8884)
+ = do { args' <- mapM reifyType (filter (not . isKind) args)
; rhs' <- reifyType rhs
; return (TH.TySynEqn args' rhs') }
@@ -1207,10 +1220,15 @@ reifyTyCon tc
= return (TH.PrimTyConI (reifyName tc) (tyConArity tc) (isUnLiftedTyCon tc))
| isFamilyTyCon tc
- = do { let tvs = tyConTyVars tc
- kind = tyConKind tc
- ; kind' <- if isLiftedTypeKind kind then return Nothing
- else fmap Just (reifyKind kind)
+ = do { let tvs = tyConTyVars tc
+ kind = tyConKind tc
+
+ -- we need the *result kind* (see #8884)
+ (kvs, mono_kind) = splitForAllTys kind
+ -- tyConArity includes *kind* params
+ (_, res_kind) = splitKindFunTysN (tyConArity tc - length kvs)
+ mono_kind
+ ; kind' <- fmap Just (reifyKind res_kind)
; tvs' <- reifyTyVars tvs
; flav' <- reifyFamFlavour tc
@@ -1306,13 +1324,14 @@ reifyClassInstance i
------------------------------
reifyFamilyInstance :: FamInst -> TcM TH.Dec
-reifyFamilyInstance (FamInst { fi_flavor = flavor
+reifyFamilyInstance (FamInst { fi_flavor = flavor
, fi_fam = fam
, fi_tys = lhs
, fi_rhs = rhs })
= case flavor of
SynFamilyInst ->
- do { th_lhs <- reifyTypes lhs
+ -- remove kind patterns (#8884)
+ do { th_lhs <- reifyTypes (filter (not . isKind) lhs)
; th_rhs <- reifyType rhs
; return (TH.TySynInstD (reifyName fam) (TH.TySynEqn th_lhs th_rhs)) }
@@ -1396,7 +1415,7 @@ reifyFamFlavour tc
| Just ax <- isClosedSynFamilyTyCon_maybe tc
= do { eqns <- brListMapM reifyAxBranch $ coAxiomBranches ax
; return $ Right eqns }
-
+
| otherwise
= panic "TcSplice.reifyFamFlavour: not a type family"
@@ -1423,6 +1442,7 @@ reify_tc_app tc tys
| tc `hasKey` listTyConKey = TH.ListT
| tc `hasKey` nilDataConKey = TH.PromotedNilT
| tc `hasKey` consDataConKey = TH.PromotedConsT
+ | tc `hasKey` eqTyConKey = TH.EqualityT
| otherwise = TH.ConT (reifyName tc)
removeKinds :: Kind -> [TypeRep.Type] -> [TypeRep.Type]
removeKinds (FunTy k1 k2) (h:t)
@@ -1438,17 +1458,7 @@ reifyPred ty
-- We could reify the implicit paramter as a class but it seems
-- nicer to support them properly...
| isIPPred ty = noTH (sLit "implicit parameters") (ppr ty)
- | otherwise
- = case classifyPredType ty of
- ClassPred cls tys -> do { tys' <- reifyTypes tys
- ; return $ TH.ClassP (reifyName cls) tys' }
- EqPred ty1 ty2 -> do { ty1' <- reifyType ty1
- ; ty2' <- reifyType ty2
- ; return $ TH.EqualP ty1' ty2'
- }
- TuplePred _ -> noTH (sLit "tuple predicates") (ppr ty)
- IrredPred _ -> noTH (sLit "irreducible predicates") (ppr ty)
-
+ | otherwise = reifyType ty
------------------------------
reifyName :: NamedThing n => n -> TH.Name
@@ -1462,7 +1472,7 @@ reifyName thing
where
name = getName thing
mod = ASSERT( isExternalName name ) nameModule name
- pkg_str = packageIdString (modulePackageId mod)
+ pkg_str = packageKeyString (modulePackageKey mod)
mod_str = moduleNameString (moduleName mod)
occ_str = occNameString occ
occ = nameOccName name
@@ -1495,26 +1505,27 @@ lookupThAnnLookup :: TH.AnnLookup -> TcM CoreAnnTarget
lookupThAnnLookup (TH.AnnLookupName th_nm) = fmap NamedTarget (lookupThName th_nm)
lookupThAnnLookup (TH.AnnLookupModule (TH.Module pn mn))
= return $ ModuleTarget $
- mkModule (stringToPackageId $ TH.pkgString pn) (mkModuleName $ TH.modString mn)
+ mkModule (stringToPackageKey $ TH.pkgString pn) (mkModuleName $ TH.modString mn)
reifyAnnotations :: Data a => TH.AnnLookup -> TcM [a]
-reifyAnnotations th_nm
- = do { name <- lookupThAnnLookup th_nm
- ; eps <- getEps
+reifyAnnotations th_name
+ = do { name <- lookupThAnnLookup th_name
+ ; topEnv <- getTopEnv
+ ; epsHptAnns <- liftIO $ prepareAnnotations topEnv Nothing
; tcg <- getGblEnv
- ; let epsAnns = findAnns deserializeWithData (eps_ann_env eps) name
- ; let envAnns = findAnns deserializeWithData (tcg_ann_env tcg) name
- ; return (envAnns ++ epsAnns) }
+ ; let selectedEpsHptAnns = findAnns deserializeWithData epsHptAnns name
+ ; let selectedTcgAnns = findAnns deserializeWithData (tcg_ann_env tcg) name
+ ; return (selectedEpsHptAnns ++ selectedTcgAnns) }
------------------------------
modToTHMod :: Module -> TH.Module
-modToTHMod m = TH.Module (TH.PkgName $ packageIdString $ modulePackageId m)
+modToTHMod m = TH.Module (TH.PkgName $ packageKeyString $ modulePackageKey m)
(TH.ModName $ moduleNameString $ moduleName m)
reifyModule :: TH.Module -> TcM TH.ModuleInfo
reifyModule (TH.Module (TH.PkgName pkgString) (TH.ModName mString)) = do
this_mod <- getModule
- let reifMod = mkModule (stringToPackageId pkgString) (mkModuleName mString)
+ let reifMod = mkModule (stringToPackageKey pkgString) (mkModuleName mString)
if (reifMod == this_mod) then reifyThisModule else reifyFromIface reifMod
where
reifyThisModule = do
@@ -1524,10 +1535,10 @@ reifyModule (TH.Module (TH.PkgName pkgString) (TH.ModName mString)) = do
reifyFromIface reifMod = do
iface <- loadInterfaceForModule (ptext (sLit "reifying module from TH for") <+> ppr reifMod) reifMod
let usages = [modToTHMod m | usage <- mi_usages iface,
- Just m <- [usageToModule (modulePackageId reifMod) usage] ]
+ Just m <- [usageToModule (modulePackageKey reifMod) usage] ]
return $ TH.ModuleInfo usages
- usageToModule :: PackageId -> Usage -> Maybe Module
+ usageToModule :: PackageKey -> Usage -> Maybe Module
usageToModule _ (UsageFile {}) = Nothing
usageToModule this_pkg (UsageHomeModule { usg_mod_name = mn }) = Just $ mkModule this_pkg mn
usageToModule _ (UsagePackageModule { usg_mod = m }) = Just m
@@ -1562,4 +1573,4 @@ will appear in TH syntax like this
\begin{code}
#endif /* GHCI */
-\end{code}
\ No newline at end of file
+\end{code}
diff --git a/compiler/typecheck/TcSplice.lhs-boot b/compiler/typecheck/TcSplice.lhs-boot
index c496aed798da..fd19dee7dacf 100644
--- a/compiler/typecheck/TcSplice.lhs-boot
+++ b/compiler/typecheck/TcSplice.lhs-boot
@@ -1,9 +1,10 @@
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcSplice where
import HsSyn ( HsSplice, HsBracket, HsQuasiQuote,
HsExpr, LHsType, LHsExpr, LPat, LHsDecl )
import HsExpr ( PendingRnSplice )
-import Id ( Id )
import Name ( Name )
import RdrName ( RdrName )
import TcRnTypes( TcM, TcId )
@@ -11,6 +12,7 @@ import TcType ( TcRhoType )
import Annotations ( Annotation, CoreAnnTarget )
#ifdef GHCI
+import Id ( Id )
import qualified Language.Haskell.TH as TH
#endif
@@ -26,20 +28,20 @@ tcTypedBracket :: HsBracket Name
-> TcRhoType
-> TcM (HsExpr TcId)
-tcTopSpliceExpr :: Bool -> TcM (LHsExpr Id) -> TcM (LHsExpr Id)
-
runQuasiQuoteDecl :: HsQuasiQuote RdrName -> TcM [LHsDecl RdrName]
runQuasiQuoteExpr :: HsQuasiQuote RdrName -> TcM (LHsExpr RdrName)
runQuasiQuoteType :: HsQuasiQuote RdrName -> TcM (LHsType RdrName)
runQuasiQuotePat :: HsQuasiQuote RdrName -> TcM (LPat RdrName)
runAnnotation :: CoreAnnTarget -> LHsExpr Name -> TcM Annotation
+#ifdef GHCI
+tcTopSpliceExpr :: Bool -> TcM (LHsExpr Id) -> TcM (LHsExpr Id)
+
runMetaE :: LHsExpr Id -> TcM (LHsExpr RdrName)
runMetaP :: LHsExpr Id -> TcM (LPat RdrName)
runMetaT :: LHsExpr Id -> TcM (LHsType RdrName)
runMetaD :: LHsExpr Id -> TcM [LHsDecl RdrName]
-#ifdef GHCI
lookupThName_maybe :: TH.Name -> TcM (Maybe Name)
runQuasi :: TH.Q a -> TcM a
#endif
diff --git a/compiler/typecheck/TcTyClsDecls.lhs b/compiler/typecheck/TcTyClsDecls.lhs
index 1fbdbb22bebd..6dcbaffef8b4 100644
--- a/compiler/typecheck/TcTyClsDecls.lhs
+++ b/compiler/typecheck/TcTyClsDecls.lhs
@@ -6,7 +6,7 @@
TcTyClsDecls: Typecheck type and class declarations
\begin{code}
-{-# LANGUAGE TupleSections #-}
+{-# LANGUAGE CPP, TupleSections #-}
module TcTyClsDecls (
tcTyAndClassDecls, tcAddImplicits,
@@ -14,7 +14,7 @@ module TcTyClsDecls (
-- Functions used by TcInstDcls to check
-- data/type family instance declarations
kcDataDefn, tcConDecls, dataDeclChecks, checkValidTyCon,
- tcSynFamInstDecl, tcFamTyPats,
+ tcFamTyPats, tcTyFamInstEqn, famTyConShape,
tcAddTyFamInstCtxt, tcAddDataFamInstCtxt,
wrongKindOfFamily, dataConCtxt, badDataConTyCon
) where
@@ -354,7 +354,6 @@ getInitialKinds decls
do { pairss <- mapM (addLocM getInitialKind) decls
; return (concat pairss) }
--- See Note [Kind-checking strategies] in TcHsType
getInitialKind :: TyClDecl Name -> TcM [(Name, TcTyThing)]
-- Allocate a fresh kind variable for each TyCon and Class
-- For each tycon, return (tc, AThing k)
@@ -375,7 +374,7 @@ getInitialKind :: TyClDecl Name -> TcM [(Name, TcTyThing)]
getInitialKind decl@(ClassDecl { tcdLName = L _ name, tcdTyVars = ktvs, tcdATs = ats })
= do { (cl_kind, inner_prs) <-
- kcHsTyVarBndrs (kcStrategy decl) ktvs $
+ kcHsTyVarBndrs (hsDeclHasCusk decl) ktvs $
do { inner_prs <- getFamDeclInitialKinds ats
; return (constraintKind, inner_prs) }
; let main_pr = (name, AThing cl_kind)
@@ -386,7 +385,7 @@ getInitialKind decl@(DataDecl { tcdLName = L _ name
, tcdDataDefn = HsDataDefn { dd_kindSig = m_sig
, dd_cons = cons } })
= do { (decl_kind, _) <-
- kcHsTyVarBndrs (kcStrategy decl) ktvs $
+ kcHsTyVarBndrs (hsDeclHasCusk decl) ktvs $
do { res_k <- case m_sig of
Just ksig -> tcLHsKind ksig
Nothing -> return liftedTypeKind
@@ -418,16 +417,14 @@ getFamDeclInitialKind decl@(FamilyDecl { fdLName = L _ name
, fdTyVars = ktvs
, fdKindSig = ksig })
= do { (fam_kind, _) <-
- kcHsTyVarBndrs (kcStrategyFamDecl decl) ktvs $
+ kcHsTyVarBndrs (famDeclHasCusk decl) ktvs $
do { res_k <- case ksig of
Just k -> tcLHsKind k
Nothing
- | defaultResToStar -> return liftedTypeKind
- | otherwise -> newMetaKindVar
+ | famDeclHasCusk decl -> return liftedTypeKind
+ | otherwise -> newMetaKindVar
; return (res_k, ()) }
; return [ (name, AThing fam_kind) ] }
- where
- defaultResToStar = (kcStrategyFamDecl decl == FullKindSignature)
----------------
kcSynDecls :: [SCC (LTyClDecl Name)]
@@ -451,7 +448,7 @@ kcSynDecl decl@(SynDecl { tcdTyVars = hs_tvs, tcdLName = L _ name
-- Returns a possibly-unzonked kind
= tcAddDeclCtxt decl $
do { (syn_kind, _) <-
- kcHsTyVarBndrs (kcStrategy decl) hs_tvs $
+ kcHsTyVarBndrs (hsDeclHasCusk decl) hs_tvs $
do { traceTc "kcd1" (ppr name <+> brackets (ppr hs_tvs))
; (_, rhs_kind) <- tcLHsType rhs
; traceTc "kcd2" (ppr name)
@@ -502,10 +499,12 @@ kcTyClDecl (ForeignType {}) = return ()
-- closed type families look at their equations, but other families don't
-- do anything here
-kcTyClDecl (FamDecl (FamilyDecl { fdLName = L _ fam_tc_name
- , fdInfo = ClosedTypeFamily eqns }))
- = do { k <- kcLookupKind fam_tc_name
- ; mapM_ (kcTyFamInstEqn fam_tc_name k) eqns }
+kcTyClDecl (FamDecl (FamilyDecl { fdLName = L _ fam_tc_name
+ , fdTyVars = hs_tvs
+ , fdInfo = ClosedTypeFamily eqns }))
+ = do { tc_kind <- kcLookupKind fam_tc_name
+ ; let fam_tc_shape = ( fam_tc_name, length (hsQTvBndrs hs_tvs), tc_kind)
+ ; mapM_ (kcTyFamInstEqn fam_tc_shape) eqns }
kcTyClDecl (FamDecl {}) = return ()
-------------------
@@ -514,7 +513,10 @@ kcConDecl (ConDecl { con_name = name, con_qvars = ex_tvs
, con_cxt = ex_ctxt, con_details = details
, con_res = res })
= addErrCtxt (dataConCtxt name) $
- do { _ <- kcHsTyVarBndrs ParametricKinds ex_tvs $
+ -- the 'False' says that the existentials don't have a CUSK, as the
+ -- concept doesn't really apply here. We just need to bring the variables
+ -- into scope!
+ do { _ <- kcHsTyVarBndrs False ex_tvs $
do { _ <- tcHsContext ex_ctxt
; mapM_ (tcHsOpenType . getBangType) (hsConDeclArgTys details)
; _ <- tcConRes res
@@ -638,13 +640,13 @@ tcTyClDecl1 _parent rec_info
; (sig_stuff, gen_dm_env) <- tcClassSigs class_name sigs meths
; at_stuff <- tcClassATs class_name (AssocFamilyTyCon clas) ats at_defs
; mindef <- tcClassMinimalDef class_name sigs sig_stuff
- ; clas <- buildClass False {- Must include unfoldings for selectors -}
+ ; clas <- buildClass
class_name tvs' roles ctxt' fds' at_stuff
sig_stuff mindef tc_isrec
; traceTc "tcClassDecl" (ppr fundeps $$ ppr tvs' $$ ppr fds')
; return (clas, tvs', gen_dm_env) }
- ; let { gen_dm_ids = [ AnId (mkExportedLocalId gen_dm_name gen_dm_ty)
+ ; let { gen_dm_ids = [ AnId (mkExportedLocalId VanillaId gen_dm_name gen_dm_ty)
| (sel_id, GenDefMeth gen_dm_name) <- classOpItems clas
, let gen_dm_tau = expectJust "tcTyClDecl1" $
lookupNameEnv gen_dm_env (idName sel_id)
@@ -699,14 +701,11 @@ tcFamDecl1 parent
; checkFamFlag tc_name -- make sure we have -XTypeFamilies
- -- check to make sure all the names used in the equations are
- -- consistent
- ; let names = map (tfie_tycon . unLoc) eqns
- ; tcSynFamInstNames lname names
-
- -- process the equations, creating CoAxBranches
- ; tycon_kind <- kcLookupKind tc_name
- ; branches <- mapM (tcTyFamInstEqn tc_name tycon_kind) eqns
+ -- Process the equations, creating CoAxBranches
+ ; tc_kind <- kcLookupKind tc_name
+ ; let fam_tc_shape = (tc_name, length (hsQTvBndrs tvs), tc_kind)
+
+ ; branches <- mapM (tcTyFamInstEqn fam_tc_shape) eqns
-- we need the tycon that we will be creating, but it's in scope.
-- just look it up.
@@ -780,7 +779,8 @@ tcDataDefn rec_info tc_name tvs kind
= do { extra_tvs <- tcDataKindSig kind
; let final_tvs = tvs ++ extra_tvs
roles = rti_roles rec_info tc_name
- ; stupid_theta <- tcHsContext ctxt
+ ; stupid_tc_theta <- tcHsContext ctxt
+ ; stupid_theta <- zonkTcTypeToTypes emptyZonkEnv stupid_tc_theta
; kind_signatures <- xoptM Opt_KindSignatures
; is_boot <- tcIsHsBoot -- Are we compiling an hs-boot file?
@@ -792,7 +792,7 @@ tcDataDefn rec_info tc_name tvs kind
; checkKind kind tc_kind
; return () }
- ; h98_syntax <- dataDeclChecks tc_name new_or_data stupid_theta cons
+ ; gadt_syntax <- dataDeclChecks tc_name new_or_data stupid_theta cons
; tycon <- fixM $ \ tycon -> do
{ let res_ty = mkTyConApp tycon (mkTyVarTys final_tvs)
@@ -807,7 +807,7 @@ tcDataDefn rec_info tc_name tvs kind
; return (buildAlgTyCon tc_name final_tvs roles cType stupid_theta tc_rhs
(rti_is_rec rec_info tc_name)
(rti_promotable rec_info)
- (not h98_syntax) NoParentTyCon) }
+ gadt_syntax NoParentTyCon) }
; return [ATyCon tycon] }
\end{code}
@@ -835,76 +835,90 @@ Note that:
- We can get default definitions only for type families, not data families
\begin{code}
-tcClassATs :: Name -- The class name (not knot-tied)
- -> TyConParent -- The class parent of this associated type
- -> [LFamilyDecl Name] -- Associated types.
- -> [LTyFamInstDecl Name] -- Associated type defaults.
+tcClassATs :: Name -- The class name (not knot-tied)
+ -> TyConParent -- The class parent of this associated type
+ -> [LFamilyDecl Name] -- Associated types.
+ -> [LTyFamDefltEqn Name] -- Associated type defaults.
-> TcM [ClassATItem]
tcClassATs class_name parent ats at_defs
= do { -- Complain about associated type defaults for non associated-types
sequence_ [ failWithTc (badATErr class_name n)
- | n <- map (tyFamInstDeclName . unLoc) at_defs
+ | n <- map at_def_tycon at_defs
, not (n `elemNameSet` at_names) ]
; mapM tc_at ats }
where
- at_names = mkNameSet (map (unLoc . fdLName . unLoc) ats)
+ at_def_tycon :: LTyFamDefltEqn Name -> Name
+ at_def_tycon (L _ eqn) = unLoc (tfe_tycon eqn)
+
+ at_fam_name :: LFamilyDecl Name -> Name
+ at_fam_name (L _ decl) = unLoc (fdLName decl)
+
+ at_names = mkNameSet (map at_fam_name ats)
- at_defs_map :: NameEnv [LTyFamInstDecl Name]
+ at_defs_map :: NameEnv [LTyFamDefltEqn Name]
-- Maps an AT in 'ats' to a list of all its default defs in 'at_defs'
at_defs_map = foldr (\at_def nenv -> extendNameEnv_C (++) nenv
- (tyFamInstDeclName (unLoc at_def)) [at_def])
+ (at_def_tycon at_def) [at_def])
emptyNameEnv at_defs
tc_at at = do { [ATyCon fam_tc] <- addLocM (tcFamDecl1 parent) at
- ; let at_defs = lookupNameEnv at_defs_map (unLoc $ fdLName $ unLoc at)
- `orElse` []
- ; atd <- mapM (tcDefaultAssocDecl fam_tc) at_defs
- ; return (fam_tc, atd) }
+ ; let at_defs = lookupNameEnv at_defs_map (at_fam_name at)
+ `orElse` []
+ ; atd <- tcDefaultAssocDecl fam_tc at_defs
+ ; return (ATI fam_tc atd) }
-------------------------
-tcDefaultAssocDecl :: TyCon -- ^ Family TyCon
- -> LTyFamInstDecl Name -- ^ RHS
- -> TcM CoAxBranch -- ^ Type checked RHS and free TyVars
-tcDefaultAssocDecl fam_tc (L loc decl)
+tcDefaultAssocDecl :: TyCon -- ^ Family TyCon
+ -> [LTyFamDefltEqn Name] -- ^ Defaults
+ -> TcM (Maybe Type) -- ^ Type checked RHS
+tcDefaultAssocDecl _ []
+ = return Nothing -- No default declaration
+
+tcDefaultAssocDecl _ (d1:_:_)
+ = failWithTc (ptext (sLit "More than one default declaration for")
+ <+> ppr (tfe_tycon (unLoc d1)))
+
+tcDefaultAssocDecl fam_tc [L loc (TyFamEqn { tfe_tycon = L _ tc_name
+ , tfe_pats = hs_tvs
+ , tfe_rhs = rhs })]
= setSrcSpan loc $
- tcAddTyFamInstCtxt decl $
- do { traceTc "tcDefaultAssocDecl" (ppr decl)
- ; tcSynFamInstDecl fam_tc decl }
+ tcAddFamInstCtxt (ptext (sLit "default type instance")) tc_name $
+ tcTyClTyVars tc_name hs_tvs $ \ tvs rhs_kind ->
+ do { traceTc "tcDefaultAssocDecl" (ppr tc_name)
+ ; checkTc (isSynFamilyTyCon fam_tc) (wrongKindOfFamily fam_tc)
+ ; let (fam_name, fam_pat_arity, _) = famTyConShape fam_tc
+ ; ASSERT( fam_name == tc_name )
+ checkTc (length (hsQTvBndrs hs_tvs) == fam_pat_arity)
+ (wrongNumberOfParmsErr fam_pat_arity)
+ ; rhs_ty <- tcCheckLHsType rhs rhs_kind
+ ; rhs_ty <- zonkTcTypeToType emptyZonkEnv rhs_ty
+ ; let fam_tc_tvs = tyConTyVars fam_tc
+ subst = zipTopTvSubst tvs (mkTyVarTys fam_tc_tvs)
+ ; return ( ASSERT( equalLength fam_tc_tvs tvs )
+ Just (substTy subst rhs_ty) ) }
-- We check for well-formedness and validity later, in checkValidClass
-------------------------
-tcSynFamInstDecl :: TyCon -> TyFamInstDecl Name -> TcM CoAxBranch
--- Placed here because type family instances appear as
--- default decls in class declarations
-tcSynFamInstDecl fam_tc (TyFamInstDecl { tfid_eqn = eqn })
- = do { checkTc (isSynTyCon fam_tc) (wrongKindOfFamily fam_tc)
- ; tcTyFamInstEqn (tyConName fam_tc) (tyConKind fam_tc) eqn }
-
--- Checks to make sure that all the names in an instance group are the same
-tcSynFamInstNames :: Located Name -> [Located Name] -> TcM ()
-tcSynFamInstNames (L _ first) names
- = do { let badNames = filter ((/= first) . unLoc) names
- ; mapM_ (failLocated (wrongNamesInInstGroup first)) badNames }
- where
- failLocated :: (Name -> SDoc) -> Located Name -> TcM ()
- failLocated msg_fun (L loc name)
- = setSrcSpan loc $
- failWithTc (msg_fun name)
-
-kcTyFamInstEqn :: Name -> Kind -> LTyFamInstEqn Name -> TcM ()
-kcTyFamInstEqn fam_tc_name kind
- (L loc (TyFamInstEqn { tfie_pats = pats, tfie_rhs = hs_ty }))
+kcTyFamInstEqn :: FamTyConShape -> LTyFamInstEqn Name -> TcM ()
+kcTyFamInstEqn fam_tc_shape
+ (L loc (TyFamEqn { tfe_pats = pats, tfe_rhs = hs_ty }))
= setSrcSpan loc $
discardResult $
- tc_fam_ty_pats fam_tc_name kind pats (discardResult . (tcCheckLHsType hs_ty))
-
-tcTyFamInstEqn :: Name -> Kind -> LTyFamInstEqn Name -> TcM CoAxBranch
-tcTyFamInstEqn fam_tc_name kind
- (L loc (TyFamInstEqn { tfie_pats = pats, tfie_rhs = hs_ty }))
+ tc_fam_ty_pats fam_tc_shape pats (discardResult . (tcCheckLHsType hs_ty))
+
+tcTyFamInstEqn :: FamTyConShape -> LTyFamInstEqn Name -> TcM CoAxBranch
+-- Needs to be here, not in TcInstDcls, because closed families
+-- (typechecked here) have TyFamInstEqns
+tcTyFamInstEqn fam_tc_shape@(fam_tc_name,_,_)
+ (L loc (TyFamEqn { tfe_tycon = L _ eqn_tc_name
+ , tfe_pats = pats
+ , tfe_rhs = hs_ty }))
= setSrcSpan loc $
- tcFamTyPats fam_tc_name kind pats (discardResult . (tcCheckLHsType hs_ty)) $
+ tcFamTyPats fam_tc_shape pats (discardResult . (tcCheckLHsType hs_ty)) $
\tvs' pats' res_kind ->
- do { rhs_ty <- tcCheckLHsType hs_ty res_kind
+ do { checkTc (fam_tc_name == eqn_tc_name)
+ (wrongTyFamName fam_tc_name eqn_tc_name)
+ ; rhs_ty <- tcCheckLHsType hs_ty res_kind
; rhs_ty <- zonkTcTypeToType emptyZonkEnv rhs_ty
; traceTc "tcTyFamInstEqn" (ppr fam_tc_name <+> ppr tvs')
-- don't print out the pats here, as they might be zonked inside the knot
@@ -946,6 +960,19 @@ type families.
tcFamTyPats type checks the patterns, zonks, and then calls thing_inside
to generate a desugaring. It is used during type-checking (not kind-checking).
+Note [Type-checking type patterns]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When typechecking the patterns of a family instance declaration, we can't
+rely on using the family TyCon, because this is sometimes called
+from within a type-checking knot. (Specifically for closed type families.)
+The type FamTyConShape gives just enough information to do the job.
+
+The "arity" field of FamTyConShape is the *visible* arity of the family
+type constructor, i.e. what the users sees and writes, not including kind
+arguments.
+
+See also Note [tc_fam_ty_pats vs tcFamTyPats]
+
Note [Failing early in kcDataDefn]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We need to use checkNoErrs when calling kcConDecl. This is because kcConDecl
@@ -960,15 +987,18 @@ two bad things could happen:
\begin{code}
-----------------
--- Note that we can't use the family TyCon, because this is sometimes called
--- from within a type-checking knot. So, we ask our callers to do a little more
--- work.
--- See Note [tc_fam_ty_pats vs tcFamTyPats]
-tc_fam_ty_pats :: Name -- of the family TyCon
- -> Kind -- of the family TyCon
+type FamTyConShape = (Name, Arity, Kind) -- See Note [Type-checking type patterns]
+
+famTyConShape :: TyCon -> FamTyConShape
+famTyConShape fam_tc
+ = ( tyConName fam_tc
+ , length (filterOut isKindVar (tyConTyVars fam_tc))
+ , tyConKind fam_tc )
+
+tc_fam_ty_pats :: FamTyConShape
-> HsWithBndrs [LHsType Name] -- Patterns
- -> (TcKind -> TcM ()) -- Kind checker for RHS
- -- result is ignored
+ -> (TcKind -> TcM ()) -- Kind checker for RHS
+ -- result is ignored
-> TcM ([Kind], [Type], Kind)
-- Check the type patterns of a type or data family instance
-- type instance F =
@@ -981,7 +1011,7 @@ tc_fam_ty_pats :: Name -- of the family TyCon
-- In that case, the type variable 'a' will *already be in scope*
-- (and, if C is poly-kinded, so will its kind parameter).
-tc_fam_ty_pats fam_tc_name kind
+tc_fam_ty_pats (name, arity, kind)
(HsWB { hswb_cts = arg_pats, hswb_kvs = kvars, hswb_tvs = tvars })
kind_checker
= do { let (fam_kvs, fam_body) = splitForAllTys kind
@@ -993,9 +1023,8 @@ tc_fam_ty_pats fam_tc_name kind
-- Note that we don't have enough information at hand to do a full check,
-- as that requires the full declared arity of the family, which isn't
-- nearby.
- ; let max_args = length (fst $ splitKindFunTys fam_body)
- ; checkTc (length arg_pats <= max_args) $
- wrongNumberOfParmsErrTooMany max_args
+ ; checkTc (length arg_pats == arity) $
+ wrongNumberOfParmsErr arity
-- Instantiate with meta kind vars
; fam_arg_kinds <- mapM (const newMetaKindVar) fam_kvs
@@ -1010,20 +1039,21 @@ tc_fam_ty_pats fam_tc_name kind
-- See Note [Quantifying over family patterns]
; typats <- tcHsTyVarBndrs hs_tvs $ \ _ ->
do { kind_checker res_kind
- ; tcHsArgTys (quotes (ppr fam_tc_name)) arg_pats arg_kinds }
+ ; tcHsArgTys (quotes (ppr name)) arg_pats arg_kinds }
; return (fam_arg_kinds, typats, res_kind) }
-- See Note [tc_fam_ty_pats vs tcFamTyPats]
-tcFamTyPats :: Name -- of the family ToCon
- -> Kind -- of the family TyCon
+tcFamTyPats :: FamTyConShape
-> HsWithBndrs [LHsType Name] -- patterns
-> (TcKind -> TcM ()) -- kind-checker for RHS
- -> ([TKVar] -> [TcType] -> Kind -> TcM a)
+ -> ([TKVar] -- Kind and type variables
+ -> [TcType] -- Kind and type arguments
+ -> Kind -> TcM a)
-> TcM a
-tcFamTyPats fam_tc_name kind pats kind_checker thing_inside
+tcFamTyPats fam_shape@(name,_,_) pats kind_checker thing_inside
= do { (fam_arg_kinds, typats, res_kind)
- <- tc_fam_ty_pats fam_tc_name kind pats kind_checker
+ <- tc_fam_ty_pats fam_shape pats kind_checker
; let all_args = fam_arg_kinds ++ typats
-- Find free variables (after zonking) and turn
@@ -1037,7 +1067,7 @@ tcFamTyPats fam_tc_name kind pats kind_checker thing_inside
; all_args' <- zonkTcTypeToTypes ze all_args
; res_kind' <- zonkTcTypeToType ze res_kind
- ; traceTc "tcFamTyPats" (ppr fam_tc_name)
+ ; traceTc "tcFamTyPats" (ppr name)
-- don't print out too much, as we might be in the knot
; tcExtendTyVarEnv qtkvs' $
thing_inside qtkvs' all_args' res_kind' }
@@ -1098,11 +1128,11 @@ dataDeclChecks :: Name -> NewOrData -> ThetaType -> [LConDecl Name] -> TcM Bool
dataDeclChecks tc_name new_or_data stupid_theta cons
= do { -- Check that we don't use GADT syntax in H98 world
gadtSyntax_ok <- xoptM Opt_GADTSyntax
- ; let h98_syntax = consUseH98Syntax cons
- ; checkTc (gadtSyntax_ok || h98_syntax) (badGadtDecl tc_name)
+ ; let gadt_syntax = consUseGadtSyntax cons
+ ; checkTc (gadtSyntax_ok || not gadt_syntax) (badGadtDecl tc_name)
-- Check that the stupid theta is empty for a GADT-style declaration
- ; checkTc (null stupid_theta || h98_syntax) (badStupidTheta tc_name)
+ ; checkTc (null stupid_theta || not gadt_syntax) (badStupidTheta tc_name)
-- Check that a newtype has exactly one constructor
-- Do this before checking for empty data decls, so that
@@ -1116,13 +1146,13 @@ dataDeclChecks tc_name new_or_data stupid_theta cons
; is_boot <- tcIsHsBoot -- Are we compiling an hs-boot file?
; checkTc (not (null cons) || empty_data_decls || is_boot)
(emptyConDeclsErr tc_name)
- ; return h98_syntax }
+ ; return gadt_syntax }
-----------------------------------
-consUseH98Syntax :: [LConDecl a] -> Bool
-consUseH98Syntax (L _ (ConDecl { con_res = ResTyGADT _ }) : _) = False
-consUseH98Syntax _ = True
+consUseGadtSyntax :: [LConDecl a] -> Bool
+consUseGadtSyntax (L _ (ConDecl { con_res = ResTyGADT _ }) : _) = True
+consUseGadtSyntax _ = False
-- All constructors have same shape
-----------------------------------
@@ -1153,7 +1183,7 @@ tcConDecl new_or_data rep_tycon tmpl_tvs res_tmpl -- Data types
(arg_tys, stricts) = unzip btys
; return (ctxt, arg_tys, res_ty, is_infix, field_lbls, stricts) }
- -- Generalise the kind variables (returning quantifed TcKindVars)
+ -- Generalise the kind variables (returning quantified TcKindVars)
-- and quantify the type variables (substituting their kinds)
-- REMEMBER: 'tkvs' are:
-- ResTyH98: the *existential* type variables only
@@ -1339,10 +1369,24 @@ since GADTs are not kind indexed.
Validity checking is done once the mutually-recursive knot has been
tied, so we can look at things freely.
+Note [Abort when superclass cycle is detected]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We must avoid doing the ambiguity check when there are already errors accumulated.
+This is because one of the errors may be a superclass cycle, and superclass cycles
+cause canonicalization to loop. Here is a representative example:
+
+ class D a => C a where
+ meth :: D a => ()
+ class C a => D a
+
+This fixes Trac #9415.
+
\begin{code}
checkClassCycleErrs :: Class -> TcM ()
checkClassCycleErrs cls
- = unless (null cls_cycles) $ mapM_ recClsErr cls_cycles
+ = unless (null cls_cycles) $
+ do { mapM_ recClsErr cls_cycles
+ ; failM } -- See Note [Abort when superclass cycle is detected]
where cls_cycles = calcClassCycles cls
checkValidTyCl :: TyThing -> TcM ()
@@ -1463,8 +1507,8 @@ checkValidClosedCoAxiom (CoAxiom { co_ax_branches = branches, co_ax_tc = tc })
-- ones and hence is inaccessible
check_accessibility prev_branches cur_branch
= do { when (cur_branch `isDominatedBy` prev_branches) $
- setSrcSpan (coAxBranchSpan cur_branch) $
- addErrTc $ inaccessibleCoAxBranch tc cur_branch
+ addWarnAt (coAxBranchSpan cur_branch) $
+ inaccessibleCoAxBranch tc cur_branch
; return (cur_branch : prev_branches) }
checkFieldCompat :: Name -> DataCon -> DataCon -> TyVarSet
@@ -1481,16 +1525,19 @@ checkValidDataCon :: DynFlags -> Bool -> TyCon -> DataCon -> TcM ()
checkValidDataCon dflags existential_ok tc con
= setSrcSpan (srcLocSpan (getSrcLoc con)) $
addErrCtxt (dataConCtxt con) $
- do { traceTc "checkValidDataCon" (ppr con $$ ppr tc)
-
- -- Check that the return type of the data constructor
+ do { -- Check that the return type of the data constructor
-- matches the type constructor; eg reject this:
-- data T a where { MkT :: Bogus a }
-- c.f. Note [Check role annotations in a second pass]
-- and Note [Checking GADT return types]
- ; let tc_tvs = tyConTyVars tc
+ let tc_tvs = tyConTyVars tc
res_ty_tmpl = mkFamilyTyConApp tc (mkTyVarTys tc_tvs)
orig_res_ty = dataConOrigResTy con
+ ; traceTc "checkValidDataCon" (vcat
+ [ ppr con, ppr tc, ppr tc_tvs
+ , ppr res_ty_tmpl <+> dcolon <+> ppr (typeKind res_ty_tmpl)
+ , ppr orig_res_ty <+> dcolon <+> ppr (typeKind orig_res_ty)])
+
; checkTc (isJust (tcMatchTy (mkVarSet tc_tvs)
res_ty_tmpl
orig_res_ty))
@@ -1578,16 +1625,19 @@ checkValidClass cls
; nullary_type_classes <- xoptM Opt_NullaryTypeClasses
; fundep_classes <- xoptM Opt_FunctionalDependencies
- -- Check that the class is unary, unless multiparameter or
- -- nullary type classes are enabled
- ; checkTc (nullary_type_classes || notNull tyvars) (nullaryClassErr cls)
- ; checkTc (multi_param_type_classes || arity <= 1) (classArityErr cls)
+ -- Check that the class is unary, unless multiparameter type classes
+ -- are enabled; also recognize deprecated nullary type classes
+ -- extension (subsumed by multiparameter type classes, Trac #8993)
+ ; checkTc (multi_param_type_classes || arity == 1 ||
+ (nullary_type_classes && arity == 0))
+ (classArityErr arity cls)
; checkTc (fundep_classes || null fundeps) (classFunDepsErr cls)
-- Check the super-classes
; checkValidTheta (ClassSCCtxt (className cls)) theta
-- Now check for cyclic superclasses
+ -- If there are superclass cycles, checkClassCycleErrs bails.
; checkClassCycleErrs cls
-- Check the class operations
@@ -1618,7 +1668,7 @@ checkValidClass cls
-- since there is no possible ambiguity
; let grown_tyvars = growThetaTyVars theta (mkVarSet tyvars)
; checkTc (arity == 0 || tyVarsOfType tau `intersectsVarSet` grown_tyvars)
- (noClassTyVarErr cls sel_id)
+ (noClassTyVarErr cls (ptext (sLit "class method") <+> quotes (ppr sel_id)))
; case dm of
GenDefMeth dm_name -> do { dm_id <- tcLookupId dm_name
@@ -1640,11 +1690,10 @@ checkValidClass cls
-- in the context of a for-all must mention at least one quantified
-- type variable. What a mess!
- check_at_defs (fam_tc, defs)
- = tcAddDefaultAssocDeclCtxt (tyConName fam_tc) $
- mapM_ (checkValidTyFamInst mb_clsinfo fam_tc) defs
-
- mb_clsinfo = Just (cls, mkVarEnv [ (tv, mkTyVarTy tv) | tv <- tyvars ])
+ check_at_defs (ATI fam_tc _)
+ = do { traceTc "check-at" (ppr fam_tc $$ ppr (tyConTyVars fam_tc) $$ ppr tyvars)
+ ; checkTc (any (`elem` tyvars) (tyConTyVars fam_tc))
+ (noClassTyVarErr cls (ptext (sLit "associated type") <+> quotes (ppr fam_tc))) }
checkFamFlag :: Name -> TcM ()
-- Check that we don't use families without -XTypeFamilies
@@ -1669,9 +1718,9 @@ checkValidRoleAnnots :: RoleAnnots -> TyThing -> TcM ()
checkValidRoleAnnots role_annots thing
= case thing of
{ ATyCon tc
- | isSynTyCon tc -> check_no_roles
- | isFamilyTyCon tc -> check_no_roles
- | isAlgTyCon tc -> check_roles
+ | isTypeSynonymTyCon tc -> check_no_roles
+ | isFamilyTyCon tc -> check_no_roles
+ | isAlgTyCon tc -> check_roles
where
name = tyConName tc
@@ -1694,6 +1743,15 @@ checkValidRoleAnnots role_annots thing
; checkTc (type_vars `equalLength` the_role_annots)
(wrongNumberOfRoles type_vars decl)
; _ <- zipWith3M checkRoleAnnot type_vars the_role_annots type_roles
+ -- Representational or phantom roles for class parameters
+ -- quickly lead to incoherence. So, we require
+ -- IncoherentInstances to have them. See #8773.
+ ; incoherent_roles_ok <- xoptM Opt_IncoherentInstances
+ ; checkTc ( incoherent_roles_ok
+ || (not $ isClassTyCon tc)
+ || (all (== Nominal) type_roles))
+ incoherentRoles
+
; lint <- goptM Opt_DoCoreLinting
; when lint $ checkValidRoles tc }
@@ -1786,7 +1844,7 @@ checkValidRoles tc
mkDefaultMethodIds :: [TyThing] -> [Id]
-- See Note [Default method Ids and Template Haskell]
mkDefaultMethodIds things
- = [ mkExportedLocalId dm_name (idType sel_id)
+ = [ mkExportedLocalId VanillaId dm_name (idType sel_id)
| ATyCon tc <- things
, Just cls <- [tyConClass_maybe tc]
, (sel_id, DefMeth dm_name) <- classOpItems cls ]
@@ -1823,11 +1881,10 @@ mkRecSelBinds tycons
mkRecSelBind :: (TyCon, FieldLabel) -> (LSig Name, LHsBinds Name)
mkRecSelBind (tycon, sel_name)
- = (L loc (IdSig sel_id), unitBag (Generated, L loc sel_bind))
+ = (L loc (IdSig sel_id), unitBag (L loc sel_bind))
where
loc = getSrcSpan sel_name
- sel_id = Var.mkExportedLocalVar rec_details sel_name
- sel_ty vanillaIdInfo
+ sel_id = mkExportedLocalId rec_details sel_name sel_ty
rec_details = RecSelId { sel_tycon = tycon, sel_naughty = is_naughty }
-- Find a representative constructor, con1
@@ -1852,8 +1909,10 @@ mkRecSelBind (tycon, sel_name)
-- Make the binding: sel (C2 { fld = x }) = x
-- sel (C7 { fld = x }) = x
-- where cons_w_field = [C2,C7]
- sel_bind | is_naughty = mkTopFunBind sel_lname [mkSimpleMatch [] unit_rhs]
- | otherwise = mkTopFunBind sel_lname (map mk_match cons_w_field ++ deflt)
+ sel_bind = mkTopFunBind Generated sel_lname alts
+ where
+ alts | is_naughty = [mkSimpleMatch [] unit_rhs]
+ | otherwise = map mk_match cons_w_field ++ deflt
mk_match con = mkSimpleMatch [L loc (mk_sel_pat con)]
(L loc (HsVar field_var))
mk_sel_pat con = ConPatIn (L loc (getName con)) (RecCon rec_fields)
@@ -1991,13 +2050,6 @@ gotten by appying the eq_spec to the univ_tvs of the data con.
%************************************************************************
\begin{code}
-tcAddDefaultAssocDeclCtxt :: Name -> TcM a -> TcM a
-tcAddDefaultAssocDeclCtxt name thing_inside
- = addErrCtxt ctxt thing_inside
- where
- ctxt = hsep [ptext (sLit "In the type synonym instance default declaration for"),
- quotes (ppr name)]
-
tcAddTyFamInstCtxt :: TyFamInstDecl Name -> TcM a -> TcM a
tcAddTyFamInstCtxt decl
= tcAddFamInstCtxt (ptext (sLit "type instance")) (tyFamInstDeclName decl)
@@ -2040,26 +2092,26 @@ classOpCtxt :: Var -> Type -> SDoc
classOpCtxt sel_id tau = sep [ptext (sLit "When checking the class method:"),
nest 2 (pprPrefixOcc sel_id <+> dcolon <+> ppr tau)]
-nullaryClassErr :: Class -> SDoc
-nullaryClassErr cls
- = vcat [ptext (sLit "No parameters for class") <+> quotes (ppr cls),
- parens (ptext (sLit "Use NullaryTypeClasses to allow no-parameter classes"))]
-
-classArityErr :: Class -> SDoc
-classArityErr cls
- = vcat [ptext (sLit "Too many parameters for class") <+> quotes (ppr cls),
- parens (ptext (sLit "Use MultiParamTypeClasses to allow multi-parameter classes"))]
+classArityErr :: Int -> Class -> SDoc
+classArityErr n cls
+ | n == 0 = mkErr "No" "no-parameter"
+ | otherwise = mkErr "Too many" "multi-parameter"
+ where
+ mkErr howMany allowWhat =
+ vcat [ptext (sLit $ howMany ++ " parameters for class") <+> quotes (ppr cls),
+ parens (ptext (sLit $ "Use MultiParamTypeClasses to allow "
+ ++ allowWhat ++ " classes"))]
classFunDepsErr :: Class -> SDoc
classFunDepsErr cls
= vcat [ptext (sLit "Fundeps in class") <+> quotes (ppr cls),
parens (ptext (sLit "Use FunctionalDependencies to allow fundeps"))]
-noClassTyVarErr :: Class -> Var -> SDoc
-noClassTyVarErr clas op
- = sep [ptext (sLit "The class method") <+> quotes (ppr op),
- ptext (sLit "mentions none of the type variables of the class") <+>
- ppr clas <+> hsep (map ppr (classTyVars clas))]
+noClassTyVarErr :: Class -> SDoc -> SDoc
+noClassTyVarErr clas what
+ = sep [ptext (sLit "The") <+> what,
+ ptext (sLit "mentions none of the type or kind variables of the class") <+>
+ quotes (ppr clas <+> hsep (map ppr (classTyVars clas)))]
recSynErr :: [LTyClDecl Name] -> TcRn ()
recSynErr syn_decls
@@ -2138,20 +2190,20 @@ wrongKindOfFamily family
| isAlgTyCon family = ptext (sLit "data type")
| otherwise = pprPanic "wrongKindOfFamily" (ppr family)
-wrongNumberOfParmsErrTooMany :: Arity -> SDoc
-wrongNumberOfParmsErrTooMany max_args
- = ptext (sLit "Number of parameters must match family declaration; expected no more than")
+wrongNumberOfParmsErr :: Arity -> SDoc
+wrongNumberOfParmsErr max_args
+ = ptext (sLit "Number of parameters must match family declaration; expected")
<+> ppr max_args
-wrongNamesInInstGroup :: Name -> Name -> SDoc
-wrongNamesInInstGroup first cur
- = ptext (sLit "Mismatched type names in closed type family declaration.") $$
- ptext (sLit "First name was") <+>
- (ppr first) <> (ptext (sLit "; this one is")) <+> (ppr cur)
+wrongTyFamName :: Name -> Name -> SDoc
+wrongTyFamName fam_tc_name eqn_tc_name
+ = hang (ptext (sLit "Mismatched type name in type family instance."))
+ 2 (vcat [ ptext (sLit "Expected:") <+> ppr fam_tc_name
+ , ptext (sLit " Actual:") <+> ppr eqn_tc_name ])
inaccessibleCoAxBranch :: TyCon -> CoAxBranch -> SDoc
inaccessibleCoAxBranch tc fi
- = ptext (sLit "Inaccessible family instance equation:") $$
+ = ptext (sLit "Overlapped type family instance equation:") $$
(pprCoAxBranch tc fi)
badRoleAnnot :: Name -> Role -> Role -> SDoc
@@ -2180,6 +2232,11 @@ needXRoleAnnotations tc
= ptext (sLit "Illegal role annotation for") <+> ppr tc <> char ';' $$
ptext (sLit "did you intend to use RoleAnnotations?")
+incoherentRoles :: SDoc
+incoherentRoles = (text "Roles other than" <+> quotes (text "nominal") <+>
+ text "for class parameters can lead to incoherence.") $$
+ (text "Use IncoherentInstances to allow this; bad role found")
+
addTyThingCtxt :: TyThing -> TcM a -> TcM a
addTyThingCtxt thing
= addErrCtxt ctxt
@@ -2187,12 +2244,12 @@ addTyThingCtxt thing
name = getName thing
flav = case thing of
ATyCon tc
- | isClassTyCon tc -> ptext (sLit "class")
- | isSynFamilyTyCon tc -> ptext (sLit "type family")
- | isDataFamilyTyCon tc -> ptext (sLit "data family")
- | isSynTyCon tc -> ptext (sLit "type")
- | isNewTyCon tc -> ptext (sLit "newtype")
- | isDataTyCon tc -> ptext (sLit "data")
+ | isClassTyCon tc -> ptext (sLit "class")
+ | isSynFamilyTyCon tc -> ptext (sLit "type family")
+ | isDataFamilyTyCon tc -> ptext (sLit "data family")
+ | isTypeSynonymTyCon tc -> ptext (sLit "type")
+ | isNewTyCon tc -> ptext (sLit "newtype")
+ | isDataTyCon tc -> ptext (sLit "data")
_ -> pprTrace "addTyThingCtxt strange" (ppr thing)
empty
diff --git a/compiler/typecheck/TcTyDecls.lhs b/compiler/typecheck/TcTyDecls.lhs
index dbecf0a75402..262aa519b32a 100644
--- a/compiler/typecheck/TcTyDecls.lhs
+++ b/compiler/typecheck/TcTyDecls.lhs
@@ -9,7 +9,8 @@ This stuff is only used for source-code decls; it's recorded in interface
files for imported data types.
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -120,7 +121,7 @@ synTyConsOfType ty
mkSynEdges :: [LTyClDecl Name] -> [(LTyClDecl Name, Name, [Name])]
mkSynEdges syn_decls = [ (ldecl, name, nameSetToList fvs)
| ldecl@(L _ (SynDecl { tcdLName = L _ name
- , tcdFVs = fvs })) <- syn_decls ]
+ , tcdFVs = fvs })) <- syn_decls ]
calcSynCycles :: [LTyClDecl Name] -> [SCC (LTyClDecl Name)]
calcSynCycles = stronglyConnCompFromEdgedVertices . mkSynEdges
@@ -263,7 +264,7 @@ this for all newtypes, we'd get infinite types. So we figure out for
each newtype whether it is "recursive", and add a coercion if so. In
effect, we are trying to "cut the loops" by identifying a loop-breaker.
-2. Avoid infinite unboxing. This is nothing to do with newtypes.
+2. Avoid infinite unboxing. This has nothing to do with newtypes.
Suppose we have
data T = MkT Int T
f (MkT x t) = f t
@@ -371,7 +372,7 @@ calcRecFlags boot_details is_boot mrole_env tyclss
, rti_is_rec = is_rec }
where
rec_tycon_names = mkNameSet (map tyConName all_tycons)
- all_tycons = mapCatMaybes getTyCon tyclss
+ all_tycons = mapMaybe getTyCon tyclss
-- Recursion of newtypes/data types can happen via
-- the class TyCon, so tyclss includes the class tycons
@@ -672,10 +673,10 @@ initialRoleEnv is_boot annots = extendNameEnvList emptyNameEnv .
initialRoleEnv1 :: Bool -> RoleAnnots -> TyCon -> (Name, [Role])
initialRoleEnv1 is_boot annots_env tc
- | isFamilyTyCon tc = (name, map (const Nominal) tyvars)
- | isAlgTyCon tc
- || isSynTyCon tc = (name, default_roles)
- | otherwise = pprPanic "initialRoleEnv1" (ppr tc)
+ | isFamilyTyCon tc = (name, map (const Nominal) tyvars)
+ | isAlgTyCon tc = (name, default_roles)
+ | isTypeSynonymTyCon tc = (name, default_roles)
+ | otherwise = pprPanic "initialRoleEnv1" (ppr tc)
where name = tyConName tc
tyvars = tyConTyVars tc
(kvs, tvs) = span isKindVar tyvars
@@ -709,6 +710,8 @@ irTyCon tc
; unless (all (== Nominal) old_roles) $ -- also catches data families,
-- which don't want or need role inference
do { whenIsJust (tyConClass_maybe tc) (irClass tc_name)
+ ; addRoleInferenceInfo tc_name (tyConTyVars tc) $
+ mapM_ (irType emptyVarSet) (tyConStupidTheta tc) -- See #8958
; mapM_ (irDataCon tc_name) (visibleDataCons $ algTyConRhs tc) }}
| Just (SynonymTyCon ty) <- synTyConRhs_maybe tc
@@ -778,7 +781,7 @@ lookupRoles tc
Just roles -> return roles
Nothing -> return $ tyConRoles tc }
--- tries to update a role; won't even update a role "downwards"
+-- tries to update a role; won't ever update a role "downwards"
updateRole :: Role -> TyVar -> RoleM ()
updateRole role tv
= do { var_ns <- getVarNs
diff --git a/compiler/typecheck/TcType.lhs b/compiler/typecheck/TcType.lhs
index 55c37b9506c8..f12ec9d6d541 100644
--- a/compiler/typecheck/TcType.lhs
+++ b/compiler/typecheck/TcType.lhs
@@ -15,6 +15,8 @@ The "tc" prefix is for "TypeChecker", because the type checker
is the principal client.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcType (
--------------------------------
-- Types
@@ -93,8 +95,6 @@ module TcType (
isFFIPrimArgumentTy, -- :: DynFlags -> Type -> Bool
isFFIPrimResultTy, -- :: DynFlags -> Type -> Bool
isFFILabelTy, -- :: Type -> Bool
- isFFIDotnetTy, -- :: DynFlags -> Type -> Bool
- isFFIDotnetObjTy, -- :: Type -> Bool
isFFITy, -- :: Type -> Bool
isFunPtrTy, -- :: Type -> Bool
tcSplitIOType_maybe, -- :: Type -> Maybe Type
@@ -173,6 +173,7 @@ import Maybes
import ListSetOps
import Outputable
import FastString
+import ErrUtils( Validity(..), isValid )
import Data.IORef
import Control.Monad (liftM, ap)
@@ -245,34 +246,23 @@ checking. It's attached to mutable type variables only.
It's knot-tied back to Var.lhs. There is no reason in principle
why Var.lhs shouldn't actually have the definition, but it "belongs" here.
-
Note [Signature skolems]
~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
- x :: [a]
- y :: b
- (x,y,z) = ([y,z], z, head x)
-
-Here, x and y have type sigs, which go into the environment. We used to
-instantiate their types with skolem constants, and push those types into
-the RHS, so we'd typecheck the RHS with type
- ( [a*], b*, c )
-where a*, b* are skolem constants, and c is an ordinary meta type varible.
-
-The trouble is that the occurrences of z in the RHS force a* and b* to
-be the *same*, so we can't make them into skolem constants that don't unify
-with each other. Alas.
+ f :: forall a. [a] -> Int
+ f (x::b : xs) = 3
-One solution would be insist that in the above defn the programmer uses
-the same type variable in both type signatures. But that takes explanation.
-
-The alternative (currently implemented) is to have a special kind of skolem
-constant, SigTv, which can unify with other SigTvs. These are *not* treated
-as rigid for the purposes of GADTs. And they are used *only* for pattern
-bindings and mutually recursive function bindings. See the function
-TcBinds.tcInstSig, and its use_skols parameter.
+Here 'b' is a lexically scoped type variable, but it turns out to be
+the same as the skolem 'a'. So we have a special kind of skolem
+constant, SigTv, which can unify with other SigTvs. They are used
+*only* for pattern type signatures.
+Similarly consider
+ data T (a:k1) = MkT (S a)
+ data S (b:k2) = MkS (T b)
+When doing kind inference on {S,T} we don't want *skolems* for k1,k2,
+because they end up unifying; we want those SigTvs again.
\begin{code}
-- A TyVarDetails is inside a TyVar
@@ -489,7 +479,7 @@ pprTcTyVarDetails (MetaTv { mtv_info = info, mtv_untch = untch })
pprUserTypeCtxt :: UserTypeCtxt -> SDoc
pprUserTypeCtxt (InfSigCtxt n) = ptext (sLit "the inferred type for") <+> quotes (ppr n)
pprUserTypeCtxt (FunSigCtxt n) = ptext (sLit "the type signature for") <+> quotes (ppr n)
-pprUserTypeCtxt (RuleSigCtxt n) = ptext (sLit "a RULE for") <+> quotes (ppr n)
+pprUserTypeCtxt (RuleSigCtxt n) = ptext (sLit "a RULE for") <+> quotes (ppr n)
pprUserTypeCtxt ExprSigCtxt = ptext (sLit "an expression type signature")
pprUserTypeCtxt (ConArgCtxt c) = ptext (sLit "the type of the constructor") <+> quotes (ppr c)
pprUserTypeCtxt (TySynCtxt c) = ptext (sLit "the RHS of the type synonym") <+> quotes (ppr c)
@@ -745,7 +735,7 @@ mkTcEqPred :: TcType -> TcType -> Type
mkTcEqPred ty1 ty2
= mkTyConApp eqTyCon [k, ty1, ty2]
where
- k = defaultKind (typeKind ty1)
+ k = typeKind ty1
\end{code}
@isTauTy@ tests for nested for-alls. It should not be called on a boxy type.
@@ -945,7 +935,7 @@ tcGetTyVar :: String -> Type -> TyVar
tcGetTyVar msg ty = expectJust msg (tcGetTyVar_maybe ty)
tcIsTyVarTy :: Type -> Bool
-tcIsTyVarTy ty = maybeToBool (tcGetTyVar_maybe ty)
+tcIsTyVarTy ty = isJust (tcGetTyVar_maybe ty)
-----------------------
tcSplitDFunTy :: Type -> ([TyVar], [Type], Class, [Type])
@@ -972,7 +962,7 @@ tcInstHeadTyNotSynonym :: Type -> Bool
-- are transparent, so we need a special function here
tcInstHeadTyNotSynonym ty
= case ty of
- TyConApp tc _ -> not (isSynTyCon tc)
+ TyConApp tc _ -> not (isTypeSynonymTyCon tc)
_ -> True
tcInstHeadTyAppAllTyVars :: Type -> Bool
@@ -992,7 +982,7 @@ tcInstHeadTyAppAllTyVars ty
-- and that each is distinct
ok tys = equalLength tvs tys && hasNoDups tvs
where
- tvs = mapCatMaybes get_tv tys
+ tvs = mapMaybe get_tv tys
get_tv (TyVarTy tv) = Just tv -- through synonyms
get_tv _ = Nothing
@@ -1018,7 +1008,8 @@ tcEqType ty1 ty2
| Just t2' <- tcView t2 = go env t1 t2'
go env (TyVarTy tv1) (TyVarTy tv2) = rnOccL env tv1 == rnOccR env tv2
go _ (LitTy lit1) (LitTy lit2) = lit1 == lit2
- go env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = go (rnBndr2 env tv1 tv2) t1 t2
+ go env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = go env (tyVarKind tv1) (tyVarKind tv2)
+ && go (rnBndr2 env tv1 tv2) t1 t2
go env (AppTy s1 t1) (AppTy s2 t2) = go env s1 s2 && go env t1 t2
go env (FunTy s1 t1) (FunTy s2 t2) = go env s1 s2 && go env t1 t2
go env (TyConApp tc1 ts1) (TyConApp tc2 ts2) = (tc1 == tc2) && gos env ts1 ts2
@@ -1037,7 +1028,8 @@ pickyEqType ty1 ty2
init_env = mkRnEnv2 (mkInScopeSet (tyVarsOfType ty1 `unionVarSet` tyVarsOfType ty2))
go env (TyVarTy tv1) (TyVarTy tv2) = rnOccL env tv1 == rnOccR env tv2
go _ (LitTy lit1) (LitTy lit2) = lit1 == lit2
- go env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = go (rnBndr2 env tv1 tv2) t1 t2
+ go env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = go env (tyVarKind tv1) (tyVarKind tv2)
+ && go (rnBndr2 env tv1 tv2) t1 t2
go env (AppTy s1 t1) (AppTy s2 t2) = go env s1 s2 && go env t1 t2
go env (FunTy s1 t1) (FunTy s2 t2) = go env s1 s2 && go env t1 t2
go env (TyConApp tc1 ts1) (TyConApp tc2 ts2) = (tc1 == tc2) && gos env ts1 ts2
@@ -1427,25 +1419,25 @@ tcSplitIOType_maybe ty
isFFITy :: Type -> Bool
-- True for any TyCon that can possibly be an arg or result of an FFI call
-isFFITy ty = checkRepTyCon legalFFITyCon ty
+isFFITy ty = isValid (checkRepTyCon legalFFITyCon ty empty)
-isFFIArgumentTy :: DynFlags -> Safety -> Type -> Bool
+isFFIArgumentTy :: DynFlags -> Safety -> Type -> Validity
-- Checks for valid argument type for a 'foreign import'
isFFIArgumentTy dflags safety ty
- = checkRepTyCon (legalOutgoingTyCon dflags safety) ty
+ = checkRepTyCon (legalOutgoingTyCon dflags safety) ty empty
-isFFIExternalTy :: Type -> Bool
+isFFIExternalTy :: Type -> Validity
-- Types that are allowed as arguments of a 'foreign export'
-isFFIExternalTy ty = checkRepTyCon legalFEArgTyCon ty
+isFFIExternalTy ty = checkRepTyCon legalFEArgTyCon ty empty
-isFFIImportResultTy :: DynFlags -> Type -> Bool
+isFFIImportResultTy :: DynFlags -> Type -> Validity
isFFIImportResultTy dflags ty
- = checkRepTyCon (legalFIResultTyCon dflags) ty
+ = checkRepTyCon (legalFIResultTyCon dflags) ty empty
-isFFIExportResultTy :: Type -> Bool
-isFFIExportResultTy ty = checkRepTyCon legalFEResultTyCon ty
+isFFIExportResultTy :: Type -> Validity
+isFFIExportResultTy ty = checkRepTyCon legalFEResultTyCon ty empty
-isFFIDynTy :: Type -> Type -> Bool
+isFFIDynTy :: Type -> Type -> Validity
-- The type in a foreign import dynamic must be Ptr, FunPtr, or a newtype of
-- either, and the wrapped function type must be equal to the given type.
-- We assume that all types have been run through normalizeFfiType, so we don't
@@ -1457,60 +1449,54 @@ isFFIDynTy expected ty
| Just (tc, [ty']) <- splitTyConApp_maybe ty
, tyConUnique tc `elem` [ptrTyConKey, funPtrTyConKey]
, eqType ty' expected
- = True
+ = IsValid
| otherwise
- = False
+ = NotValid (vcat [ ptext (sLit "Expected: Ptr/FunPtr") <+> pprParendType expected <> comma
+ , ptext (sLit " Actual:") <+> ppr ty ])
-isFFILabelTy :: Type -> Bool
+isFFILabelTy :: Type -> Validity
-- The type of a foreign label must be Ptr, FunPtr, or a newtype of either.
-isFFILabelTy = checkRepTyConKey [ptrTyConKey, funPtrTyConKey]
+isFFILabelTy ty = checkRepTyCon ok ty extra
+ where
+ ok tc = tc `hasKey` funPtrTyConKey || tc `hasKey` ptrTyConKey
+ extra = ptext (sLit "A foreign-imported address (via &foo) must have type (Ptr a) or (FunPtr a)")
-isFFIPrimArgumentTy :: DynFlags -> Type -> Bool
+isFFIPrimArgumentTy :: DynFlags -> Type -> Validity
-- Checks for valid argument type for a 'foreign import prim'
-- Currently they must all be simple unlifted types, or the well-known type
-- Any, which can be used to pass the address to a Haskell object on the heap to
-- the foreign function.
isFFIPrimArgumentTy dflags ty
- = isAnyTy ty || checkRepTyCon (legalFIPrimArgTyCon dflags) ty
+ | isAnyTy ty = IsValid
+ | otherwise = checkRepTyCon (legalFIPrimArgTyCon dflags) ty empty
-isFFIPrimResultTy :: DynFlags -> Type -> Bool
+isFFIPrimResultTy :: DynFlags -> Type -> Validity
-- Checks for valid result type for a 'foreign import prim'
-- Currently it must be an unlifted type, including unboxed tuples.
isFFIPrimResultTy dflags ty
- = checkRepTyCon (legalFIPrimResultTyCon dflags) ty
-
-isFFIDotnetTy :: DynFlags -> Type -> Bool
-isFFIDotnetTy dflags ty
- = checkRepTyCon (\ tc -> (legalFIResultTyCon dflags tc ||
- isFFIDotnetObjTy ty || isStringTy ty)) ty
- -- NB: isStringTy used to look through newtypes, but
- -- it no longer does so. May need to adjust isFFIDotNetTy
- -- if we do want to look through newtypes.
-
-isFFIDotnetObjTy :: Type -> Bool
-isFFIDotnetObjTy ty
- = checkRepTyCon check_tc t_ty
- where
- (_, t_ty) = tcSplitForAllTys ty
- check_tc tc = getName tc == objectTyConName
+ = checkRepTyCon (legalFIPrimResultTyCon dflags) ty empty
isFunPtrTy :: Type -> Bool
-isFunPtrTy = checkRepTyConKey [funPtrTyConKey]
+isFunPtrTy ty = isValid (checkRepTyCon (`hasKey` funPtrTyConKey) ty empty)
-- normaliseFfiType gets run before checkRepTyCon, so we don't
-- need to worry about looking through newtypes or type functions
-- here; that's already been taken care of.
-checkRepTyCon :: (TyCon -> Bool) -> Type -> Bool
-checkRepTyCon check_tc ty
- | Just (tc, _) <- splitTyConApp_maybe ty
- = check_tc tc
- | otherwise
- = False
-
-checkRepTyConKey :: [Unique] -> Type -> Bool
--- Like checkRepTyCon, but just looks at the TyCon key
-checkRepTyConKey keys
- = checkRepTyCon (\tc -> tyConUnique tc `elem` keys)
+checkRepTyCon :: (TyCon -> Bool) -> Type -> SDoc -> Validity
+checkRepTyCon check_tc ty extra
+ = case splitTyConApp_maybe ty of
+ Just (tc, tys)
+ | isNewTyCon tc -> NotValid (hang msg 2 (mk_nt_reason tc tys $$ nt_fix))
+ | check_tc tc -> IsValid
+ | otherwise -> NotValid (msg $$ extra)
+ Nothing -> NotValid (quotes (ppr ty) <+> ptext (sLit "is not a data type") $$ extra)
+ where
+ msg = quotes (ppr ty) <+> ptext (sLit "cannot be marshalled in a foreign call")
+ mk_nt_reason tc tys
+ | null tys = ptext (sLit "because its data construtor is not in scope")
+ | otherwise = ptext (sLit "because the data construtor for")
+ <+> quotes (ppr tc) <+> ptext (sLit "is not in scope")
+ nt_fix = ptext (sLit "Possible fix: import the data constructor to bring it into scope")
\end{code}
Note [Foreign import dynamic]
@@ -1557,21 +1543,25 @@ legalOutgoingTyCon dflags _ tc
legalFFITyCon :: TyCon -> Bool
-- True for any TyCon that can possibly be an arg or result of an FFI call
legalFFITyCon tc
- = isUnLiftedTyCon tc || boxedMarshalableTyCon tc || tc == unitTyCon
+ | isUnLiftedTyCon tc = True
+ | tc == unitTyCon = True
+ | otherwise = boxedMarshalableTyCon tc
marshalableTyCon :: DynFlags -> TyCon -> Bool
marshalableTyCon dflags tc
- = (xopt Opt_UnliftedFFITypes dflags
+ | (xopt Opt_UnliftedFFITypes dflags
&& isUnLiftedTyCon tc
&& not (isUnboxedTupleTyCon tc)
&& case tyConPrimRep tc of -- Note [Marshalling VoidRep]
VoidRep -> False
_ -> True)
- || boxedMarshalableTyCon tc
+ = True
+ | otherwise
+ = boxedMarshalableTyCon tc
boxedMarshalableTyCon :: TyCon -> Bool
boxedMarshalableTyCon tc
- = getUnique tc `elem` [ intTyConKey, int8TyConKey, int16TyConKey
+ | getUnique tc `elem` [ intTyConKey, int8TyConKey, int16TyConKey
, int32TyConKey, int64TyConKey
, wordTyConKey, word8TyConKey, word16TyConKey
, word32TyConKey, word64TyConKey
@@ -1581,26 +1571,35 @@ boxedMarshalableTyCon tc
, stablePtrTyConKey
, boolTyConKey
]
+ = True
+
+ | otherwise = False
legalFIPrimArgTyCon :: DynFlags -> TyCon -> Bool
-- Check args of 'foreign import prim', only allow simple unlifted types.
-- Strictly speaking it is unnecessary to ban unboxed tuples here since
-- currently they're of the wrong kind to use in function args anyway.
legalFIPrimArgTyCon dflags tc
- = xopt Opt_UnliftedFFITypes dflags
+ | xopt Opt_UnliftedFFITypes dflags
&& isUnLiftedTyCon tc
&& not (isUnboxedTupleTyCon tc)
+ = True
+ | otherwise
+ = False
legalFIPrimResultTyCon :: DynFlags -> TyCon -> Bool
-- Check result type of 'foreign import prim'. Allow simple unlifted
-- types and also unboxed tuple result types '... -> (# , , #)'
legalFIPrimResultTyCon dflags tc
- = xopt Opt_UnliftedFFITypes dflags
+ | xopt Opt_UnliftedFFITypes dflags
&& isUnLiftedTyCon tc
&& (isUnboxedTupleTyCon tc
|| case tyConPrimRep tc of -- Note [Marshalling VoidRep]
VoidRep -> False
_ -> True)
+ = True
+ | otherwise
+ = False
\end{code}
Note [Marshalling VoidRep]
diff --git a/compiler/typecheck/TcTypeNats.hs b/compiler/typecheck/TcTypeNats.hs
index c19164bf4b04..37fc6e0cdbcd 100644
--- a/compiler/typecheck/TcTypeNats.hs
+++ b/compiler/typecheck/TcTypeNats.hs
@@ -12,9 +12,14 @@ import Coercion ( Role(..) )
import TcRnTypes ( Xi )
import CoAxiom ( CoAxiomRule(..), BuiltInSynFamily(..) )
import Name ( Name, BuiltInSyntax(..) )
-import TysWiredIn ( typeNatKind, mkWiredInTyConName
+import TysWiredIn ( typeNatKind, typeSymbolKind
+ , mkWiredInTyConName
, promotedBoolTyCon
, promotedFalseDataCon, promotedTrueDataCon
+ , promotedOrderingTyCon
+ , promotedLTDataCon
+ , promotedEQDataCon
+ , promotedGTDataCon
)
import TysPrim ( tyVarList, mkArrowKinds )
import PrelNames ( gHC_TYPELITS
@@ -23,6 +28,8 @@ import PrelNames ( gHC_TYPELITS
, typeNatExpTyFamNameKey
, typeNatLeqTyFamNameKey
, typeNatSubTyFamNameKey
+ , typeNatCmpTyFamNameKey
+ , typeSymbolCmpTyFamNameKey
)
import FastString ( FastString, fsLit )
import qualified Data.Map as Map
@@ -39,6 +46,8 @@ typeNatTyCons =
, typeNatExpTyCon
, typeNatLeqTyCon
, typeNatSubTyCon
+ , typeNatCmpTyCon
+ , typeSymbolCmpTyCon
]
typeNatAddTyCon :: TyCon
@@ -103,6 +112,45 @@ typeNatLeqTyCon =
, sfInteractInert = interactInertLeq
}
+typeNatCmpTyCon :: TyCon
+typeNatCmpTyCon =
+ mkSynTyCon name
+ (mkArrowKinds [ typeNatKind, typeNatKind ] orderingKind)
+ (take 2 $ tyVarList typeNatKind)
+ [Nominal,Nominal]
+ (BuiltInSynFamTyCon ops)
+ NoParentTyCon
+
+ where
+ name = mkWiredInTyConName UserSyntax gHC_TYPELITS (fsLit "CmpNat")
+ typeNatCmpTyFamNameKey typeNatCmpTyCon
+ ops = BuiltInSynFamily
+ { sfMatchFam = matchFamCmpNat
+ , sfInteractTop = interactTopCmpNat
+ , sfInteractInert = \_ _ _ _ -> []
+ }
+
+typeSymbolCmpTyCon :: TyCon
+typeSymbolCmpTyCon =
+ mkSynTyCon name
+ (mkArrowKinds [ typeSymbolKind, typeSymbolKind ] orderingKind)
+ (take 2 $ tyVarList typeSymbolKind)
+ [Nominal,Nominal]
+ (BuiltInSynFamTyCon ops)
+ NoParentTyCon
+
+ where
+ name = mkWiredInTyConName UserSyntax gHC_TYPELITS (fsLit "CmpSymbol")
+ typeSymbolCmpTyFamNameKey typeSymbolCmpTyCon
+ ops = BuiltInSynFamily
+ { sfMatchFam = matchFamCmpSymbol
+ , sfInteractTop = interactTopCmpSymbol
+ , sfInteractInert = \_ _ _ _ -> []
+ }
+
+
+
+
-- Make a binary built-in constructor of kind: Nat -> Nat -> Nat
mkTypeNatFunTyCon2 :: Name -> BuiltInSynFamily -> TyCon
@@ -127,6 +175,8 @@ axAddDef
, axMulDef
, axExpDef
, axLeqDef
+ , axCmpNatDef
+ , axCmpSymbolDef
, axAdd0L
, axAdd0R
, axMul0L
@@ -137,6 +187,8 @@ axAddDef
, axExp0R
, axExp1R
, axLeqRefl
+ , axCmpNatRefl
+ , axCmpSymbolRefl
, axLeq0L
, axSubDef
, axSub0R
@@ -154,6 +206,25 @@ axExpDef = mkBinAxiom "ExpDef" typeNatExpTyCon $
axLeqDef = mkBinAxiom "LeqDef" typeNatLeqTyCon $
\x y -> Just $ bool (x <= y)
+axCmpNatDef = mkBinAxiom "CmpNatDef" typeNatCmpTyCon
+ $ \x y -> Just $ ordering (compare x y)
+
+axCmpSymbolDef =
+ CoAxiomRule
+ { coaxrName = fsLit "CmpSymbolDef"
+ , coaxrTypeArity = 2
+ , coaxrAsmpRoles = []
+ , coaxrRole = Nominal
+ , coaxrProves = \ts cs ->
+ case (ts,cs) of
+ ([s,t],[]) ->
+ do x <- isStrLitTy s
+ y <- isStrLitTy t
+ return (mkTyConApp typeSymbolCmpTyCon [s,t] ===
+ ordering (compare x y))
+ _ -> Nothing
+ }
+
axSubDef = mkBinAxiom "SubDef" typeNatSubTyCon $
\x y -> fmap num (minus x y)
@@ -168,6 +239,10 @@ axExp1L = mkAxiom1 "Exp1L" $ \t -> (num 1 .^. t) === num 1
axExp0R = mkAxiom1 "Exp0R" $ \t -> (t .^. num 0) === num 1
axExp1R = mkAxiom1 "Exp1R" $ \t -> (t .^. num 1) === t
axLeqRefl = mkAxiom1 "LeqRefl" $ \t -> (t <== t) === bool True
+axCmpNatRefl = mkAxiom1 "CmpNatRefl"
+ $ \t -> (cmpNat t t) === ordering EQ
+axCmpSymbolRefl = mkAxiom1 "CmpSymbolRefl"
+ $ \t -> (cmpSymbol t t) === ordering EQ
axLeq0L = mkAxiom1 "Leq0L" $ \t -> (num 0 <== t) === bool True
typeNatCoAxiomRules :: Map.Map FastString CoAxiomRule
@@ -176,6 +251,8 @@ typeNatCoAxiomRules = Map.fromList $ map (\x -> (coaxrName x, x))
, axMulDef
, axExpDef
, axLeqDef
+ , axCmpNatDef
+ , axCmpSymbolDef
, axAdd0L
, axAdd0R
, axMul0L
@@ -186,6 +263,8 @@ typeNatCoAxiomRules = Map.fromList $ map (\x -> (coaxrName x, x))
, axExp0R
, axExp1R
, axLeqRefl
+ , axCmpNatRefl
+ , axCmpSymbolRefl
, axLeq0L
, axSubDef
]
@@ -211,6 +290,12 @@ s .^. t = mkTyConApp typeNatExpTyCon [s,t]
(<==) :: Type -> Type -> Type
s <== t = mkTyConApp typeNatLeqTyCon [s,t]
+cmpNat :: Type -> Type -> Type
+cmpNat s t = mkTyConApp typeNatCmpTyCon [s,t]
+
+cmpSymbol :: Type -> Type -> Type
+cmpSymbol s t = mkTyConApp typeSymbolCmpTyCon [s,t]
+
(===) :: Type -> Type -> Pair Type
x === y = Pair x y
@@ -232,6 +317,25 @@ isBoolLitTy tc =
| tc == promotedTrueDataCon -> return True
| otherwise -> Nothing
+orderingKind :: Kind
+orderingKind = mkTyConApp promotedOrderingTyCon []
+
+ordering :: Ordering -> Type
+ordering o =
+ case o of
+ LT -> mkTyConApp promotedLTDataCon []
+ EQ -> mkTyConApp promotedEQDataCon []
+ GT -> mkTyConApp promotedGTDataCon []
+
+isOrderingLitTy :: Type -> Maybe Ordering
+isOrderingLitTy tc =
+ do (tc1,[]) <- splitTyConApp_maybe tc
+ case () of
+ _ | tc1 == promotedLTDataCon -> return LT
+ | tc1 == promotedEQDataCon -> return EQ
+ | tc1 == promotedGTDataCon -> return GT
+ | otherwise -> Nothing
+
known :: (Integer -> Bool) -> TcType -> Bool
known p x = case isNumLitTy x of
Just a -> p a
@@ -258,6 +362,8 @@ mkBinAxiom str tc f =
_ -> Nothing
}
+
+
mkAxiom1 :: String -> (Type -> Pair Type) -> CoAxiomRule
mkAxiom1 str f =
CoAxiomRule
@@ -328,6 +434,25 @@ matchFamLeq [s,t]
mbY = isNumLitTy t
matchFamLeq _ = Nothing
+matchFamCmpNat :: [Type] -> Maybe (CoAxiomRule, [Type], Type)
+matchFamCmpNat [s,t]
+ | Just x <- mbX, Just y <- mbY =
+ Just (axCmpNatDef, [s,t], ordering (compare x y))
+ | tcEqType s t = Just (axCmpNatRefl, [s], ordering EQ)
+ where mbX = isNumLitTy s
+ mbY = isNumLitTy t
+matchFamCmpNat _ = Nothing
+
+matchFamCmpSymbol :: [Type] -> Maybe (CoAxiomRule, [Type], Type)
+matchFamCmpSymbol [s,t]
+ | Just x <- mbX, Just y <- mbY =
+ Just (axCmpSymbolDef, [s,t], ordering (compare x y))
+ | tcEqType s t = Just (axCmpSymbolRefl, [s], ordering EQ)
+ where mbX = isStrLitTy s
+ mbY = isStrLitTy t
+matchFamCmpSymbol _ = Nothing
+
+
{-------------------------------------------------------------------------------
Interact with axioms
-------------------------------------------------------------------------------}
@@ -415,6 +540,17 @@ interactTopLeq [s,t] r
mbZ = isBoolLitTy r
interactTopLeq _ _ = []
+interactTopCmpNat :: [Xi] -> Xi -> [Pair Type]
+interactTopCmpNat [s,t] r
+ | Just EQ <- isOrderingLitTy r = [ s === t ]
+interactTopCmpNat _ _ = []
+
+interactTopCmpSymbol :: [Xi] -> Xi -> [Pair Type]
+interactTopCmpSymbol [s,t] r
+ | Just EQ <- isOrderingLitTy r = [ s === t ]
+interactTopCmpSymbol _ _ = []
+
+
{-------------------------------------------------------------------------------
@@ -466,6 +602,10 @@ interactInertLeq _ _ _ _ = []
+
+
+
+
{- -----------------------------------------------------------------------------
These inverse functions are used for simplifying propositions using
concrete natural numbers.
diff --git a/compiler/typecheck/TcUnify.lhs b/compiler/typecheck/TcUnify.lhs
index 94b6aebeb543..ef06ddd2634c 100644
--- a/compiler/typecheck/TcUnify.lhs
+++ b/compiler/typecheck/TcUnify.lhs
@@ -6,7 +6,8 @@
Type subsumption and unification
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -824,8 +825,8 @@ uUnfilledVars origin swapped tv1 details1 tv2 details2
-- k1 = k2, so we are free to update either way
(EQ, MetaTv { mtv_info = i1, mtv_ref = ref1 },
MetaTv { mtv_info = i2, mtv_ref = ref2 })
- | nicer_to_update_tv1 i1 i2 -> updateMeta tv1 ref1 ty2
- | otherwise -> updateMeta tv2 ref2 ty1
+ | nicer_to_update_tv1 tv1 i1 i2 -> updateMeta tv1 ref1 ty2
+ | otherwise -> updateMeta tv2 ref2 ty1
(EQ, MetaTv { mtv_ref = ref1 }, _) -> updateMeta tv1 ref1 ty2
(EQ, _, MetaTv { mtv_ref = ref2 }) -> updateMeta tv2 ref2 ty1
@@ -838,9 +839,10 @@ uUnfilledVars origin swapped tv1 details1 tv2 details2
ty1 = mkTyVarTy tv1
ty2 = mkTyVarTy tv2
- nicer_to_update_tv1 _ SigTv = True
- nicer_to_update_tv1 SigTv _ = False
- nicer_to_update_tv1 _ _ = isSystemName (Var.varName tv1)
+nicer_to_update_tv1 :: TcTyVar -> MetaInfo -> MetaInfo -> Bool
+nicer_to_update_tv1 _ _ SigTv = True
+nicer_to_update_tv1 _ SigTv _ = False
+nicer_to_update_tv1 tv1 _ _ = isSystemName (Var.varName tv1)
-- Try not to update SigTvs; and try to update sys-y type
-- variables in preference to ones gotten (say) by
-- instantiating a polymorphic function with a user-written
@@ -1069,6 +1071,31 @@ one of argTypeKind or openTypeKind.
The situation is different in the core of the compiler, where we are perfectly
happy to have types of kind Constraint on either end of an arrow.
+Note [Kind variables can be untouchable]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We must use the careful function lookupTcTyVar to see if a kind
+variable is filled or unifiable. It checks for touchablity, and kind
+variables can certainly be untouchable --- for example the variable
+might be bound outside an enclosing existental pattern match that
+binds an inner kind variable, which we don't want ot escape outside.
+
+This, or something closely related, was teh cause of Trac #8985.
+
+Note [Unifying kind variables]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Rather hackily, kind variables can be TyVars not just TcTyVars.
+Main reason is in
+ data instance T (D (x :: k)) = ...con-decls...
+Here we bring into scope a kind variable 'k', and use it in the
+con-decls. BUT the con-decls will be finished and frozen, and
+are not amenable to subsequent substitution, so it makes sense
+to have the *final* kind-variable (a KindVar, not a TcKindVar) in
+scope. So at least during kind unification we can encounter a
+KindVar.
+
+Hence the isTcTyVar tests before calling lookupTcTyVar.
+
+
\begin{code}
matchExpectedFunKind :: TcKind -> TcM (Maybe (TcKind, TcKind))
-- Like unifyFunTy, but does not fail; instead just returns Nothing
@@ -1117,37 +1144,66 @@ unifyKindX (TyConApp kc1 []) (TyConApp kc2 [])
unifyKindX k1 k2 = unifyKindEq k1 k2
-- In all other cases, let unifyKindEq do the work
+-------------------
uKVar :: SwapFlag -> (TcKind -> TcKind -> TcM (Maybe Ordering))
-> MetaKindVar -> TcKind -> TcM (Maybe Ordering)
uKVar swapped unify_kind kv1 k2
- | isTcTyVar kv1, isMetaTyVar kv1 -- See Note [Unifying kind variables]
- = do { mb_k1 <- readMetaTyVar kv1
- ; case mb_k1 of
- Flexi -> uUnboundKVar kv1 k2
- Indirect k1 -> unSwap swapped unify_kind k1 k2 }
- | TyVarTy kv2 <- k2, kv1 == kv2
+ | isTcTyVar kv1
+ = do { lookup_res <- lookupTcTyVar kv1 -- See Note [Kind variables can be untouchable]
+ ; case lookup_res of
+ Filled k1 -> unSwap swapped unify_kind k1 k2
+ Unfilled ds1 -> uUnfilledKVar kv1 ds1 k2 }
+
+ | otherwise -- See Note [Unifying kind variables]
+ = uUnfilledKVar kv1 vanillaSkolemTv k2
+
+-------------------
+uUnfilledKVar :: MetaKindVar -> TcTyVarDetails -> TcKind -> TcM (Maybe Ordering)
+uUnfilledKVar kv1 ds1 (TyVarTy kv2)
+ | kv1 == kv2
= return (Just EQ)
- | TyVarTy kv2 <- k2, isTcTyVar kv2, isMetaTyVar kv2
- = uKVar (flipSwap swapped) unify_kind kv2 (TyVarTy kv1)
+ | isTcTyVar kv2
+ = do { lookup_res <- lookupTcTyVar kv2
+ ; case lookup_res of
+ Filled k2 -> uUnfilledKVar kv1 ds1 k2
+ Unfilled ds2 -> uUnfilledKVars kv1 ds1 kv2 ds2 }
- | otherwise
- = return Nothing
+ | otherwise -- See Note [Unifying kind variables]
+ = uUnfilledKVars kv1 ds1 kv2 vanillaSkolemTv
-{- Note [Unifying kind variables]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Rather hackily, kind variables can be TyVars not just TcTyVars.
-Main reason is in
- data instance T (D (x :: k)) = ...con-decls...
-Here we bring into scope a kind variable 'k', and use it in the
-con-decls. BUT the con-decls will be finished and frozen, and
-are not amenable to subsequent substitution, so it makes sense
-to have the *final* kind-variable (a KindVar, not a TcKindVar) in
-scope. So at least during kind unification we can encounter a
-KindVar.
-
-Hence the isTcTyVar tests before using isMetaTyVar.
--}
+uUnfilledKVar kv1 ds1 non_var_k2
+ = case ds1 of
+ MetaTv { mtv_info = SigTv }
+ -> return Nothing
+ MetaTv { mtv_ref = ref1 }
+ -> do { k2a <- zonkTcKind non_var_k2
+ ; let k2b = defaultKind k2a
+ -- MetaKindVars must be bound only to simple kinds
+
+ ; dflags <- getDynFlags
+ ; case occurCheckExpand dflags kv1 k2b of
+ OC_OK k2c -> do { writeMetaTyVarRef kv1 ref1 k2c; return (Just EQ) }
+ _ -> return Nothing }
+ _ -> return Nothing
+
+-------------------
+uUnfilledKVars :: MetaKindVar -> TcTyVarDetails
+ -> MetaKindVar -> TcTyVarDetails
+ -> TcM (Maybe Ordering)
+-- kv1 /= kv2
+uUnfilledKVars kv1 ds1 kv2 ds2
+ = case (ds1, ds2) of
+ (MetaTv { mtv_info = i1, mtv_ref = r1 },
+ MetaTv { mtv_info = i2, mtv_ref = r2 })
+ | nicer_to_update_tv1 kv1 i1 i2 -> do_update kv1 r1 kv2
+ | otherwise -> do_update kv2 r2 kv1
+ (MetaTv { mtv_ref = r1 }, _) -> do_update kv1 r1 kv2
+ (_, MetaTv { mtv_ref = r2 }) -> do_update kv2 r2 kv1
+ _ -> return Nothing
+ where
+ do_update kv1 r1 kv2
+ = do { writeMetaTyVarRef kv1 r1 (mkTyVarTy kv2); return (Just EQ) }
---------------------------
unifyKindEq :: TcKind -> TcKind -> TcM (Maybe Ordering)
@@ -1159,41 +1215,16 @@ unifyKindEq k1 (TyVarTy kv2) = uKVar IsSwapped unifyKindEq kv2 k1
unifyKindEq (FunTy a1 r1) (FunTy a2 r2)
= do { mb1 <- unifyKindEq a1 a2; mb2 <- unifyKindEq r1 r2
; return (if isJust mb1 && isJust mb2 then Just EQ else Nothing) }
-
+
unifyKindEq (TyConApp kc1 k1s) (TyConApp kc2 k2s)
| kc1 == kc2
= ASSERT(length k1s == length k2s)
- -- Should succeed since the kind constructors are the same,
+ -- Should succeed since the kind constructors are the same,
-- and the kinds are sort-checked, thus fully applied
do { mb_eqs <- zipWithM unifyKindEq k1s k2s
- ; return (if all isJust mb_eqs
- then Just EQ
+ ; return (if all isJust mb_eqs
+ then Just EQ
else Nothing) }
unifyKindEq _ _ = return Nothing
-
-----------------
-uUnboundKVar :: MetaKindVar -> TcKind -> TcM (Maybe Ordering)
-uUnboundKVar kv1 k2@(TyVarTy kv2)
- | kv1 == kv2 = return (Just EQ)
- | isTcTyVar kv2, isMetaTyVar kv2 -- Distinct kind variables
- = do { mb_k2 <- readMetaTyVar kv2
- ; case mb_k2 of
- Indirect k2 -> uUnboundKVar kv1 k2
- Flexi -> do { writeMetaTyVar kv1 k2; return (Just EQ) } }
- | otherwise
- = do { writeMetaTyVar kv1 k2; return (Just EQ) }
-
-uUnboundKVar kv1 non_var_k2
- | isSigTyVar kv1
- = return Nothing
- | otherwise
- = do { k2a <- zonkTcKind non_var_k2
- ; let k2b = defaultKind k2a
- -- MetaKindVars must be bound only to simple kinds
-
- ; dflags <- getDynFlags
- ; case occurCheckExpand dflags kv1 k2b of
- OC_OK k2c -> do { writeMetaTyVar kv1 k2c; return (Just EQ) }
- _ -> return Nothing }
\end{code}
diff --git a/compiler/typecheck/TcValidity.lhs b/compiler/typecheck/TcValidity.lhs
index 20547bc51e04..f8357825a78e 100644
--- a/compiler/typecheck/TcValidity.lhs
+++ b/compiler/typecheck/TcValidity.lhs
@@ -4,6 +4,8 @@
%
\begin{code}
+{-# LANGUAGE CPP #-}
+
module TcValidity (
Rank, UserTypeCtxt(..), checkValidType, checkValidMonoType,
expectedKindInCtxt,
@@ -38,17 +40,15 @@ import Name
import VarEnv
import VarSet
import ErrUtils
-import PrelNames
import DynFlags
import Util
-import Maybes
import ListSetOps
import SrcLoc
import Outputable
import FastString
-import BasicTypes ( Arity )
import Control.Monad
+import Data.Maybe
import Data.List ( (\\) )
\end{code}
@@ -68,13 +68,21 @@ checkAmbiguity ctxt ty
-- Then :k T should work in GHCi, not complain that
-- (T k) is ambiguous!
+ | InfSigCtxt {} <- ctxt -- See Note [Validity of inferred types] in TcBinds
+ = return ()
+
| otherwise
= do { traceTc "Ambiguity check for" (ppr ty)
- ; (subst, _tvs) <- tcInstSkolTyVars (varSetElems (tyVarsOfType ty))
+ ; let free_tkvs = varSetElemsKvsFirst (closeOverKinds (tyVarsOfType ty))
+ ; (subst, _tvs) <- tcInstSkolTyVars free_tkvs
; let ty' = substTy subst ty
- -- The type might have free TyVars,
- -- so we skolemise them as TcTyVars
+ -- The type might have free TyVars, esp when the ambiguity check
+ -- happens during a call to checkValidType,
+ -- so we skolemise them as TcTyVars.
-- Tiresome; but the type inference engine expects TcTyVars
+ -- NB: The free tyvar might be (a::k), so k is also free
+ -- and we must skolemise it as well. Hence closeOverKinds.
+ -- (Trac #9222)
-- Solve the constraints eagerly because an ambiguous type
-- can cause a cascade of further errors. Since the free
@@ -286,7 +294,7 @@ check_type ctxt rank (AppTy ty1 ty2)
; check_arg_type ctxt rank ty2 }
check_type ctxt rank ty@(TyConApp tc tys)
- | isSynTyCon tc = check_syn_tc_app ctxt rank ty tc tys
+ | isTypeSynonymTyCon tc = check_syn_tc_app ctxt rank ty tc tys
| isUnboxedTupleTyCon tc = check_ubx_tuple ctxt ty tys
| otherwise = mapM_ (check_arg_type ctxt rank) tys
@@ -436,9 +444,21 @@ If we do both, we get exponential behaviour!!
%* *
%************************************************************************
+Note [Implicit parameters in instance decls]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Implicit parameters _only_ allowed in type signatures; not in instance
+decls, superclasses etc. The reason for not allowing implicit params in
+instances is a bit subtle. If we allowed
+ instance (?x::Int, Eq a) => Foo [a] where ...
+then when we saw
+ (e :: (?x::Int) => t)
+it would be unclear how to discharge all the potential uses of the ?x
+in e. For example, a constraint Foo [Int] might come out of e, and
+applying the instance decl would show up two uses of ?x. Trac #8912.
+
\begin{code}
checkValidTheta :: UserTypeCtxt -> ThetaType -> TcM ()
-checkValidTheta ctxt theta
+checkValidTheta ctxt theta
= addErrCtxt (checkThetaCtxt ctxt theta) (check_valid_theta ctxt theta)
-------------------------
@@ -460,36 +480,21 @@ check_pred_ty :: DynFlags -> UserTypeCtxt -> PredType -> TcM ()
-- type synonyms have been checked at their definition site
check_pred_ty dflags ctxt pred
- | Just (tc,tys) <- tcSplitTyConApp_maybe pred
- = case () of
- _ | Just cls <- tyConClass_maybe tc
- -> check_class_pred dflags ctxt cls tys
-
- | tc `hasKey` eqTyConKey
- , let [_, ty1, ty2] = tys
- -> check_eq_pred dflags ctxt ty1 ty2
-
- | isTupleTyCon tc
- -> check_tuple_pred dflags ctxt pred tys
-
- | otherwise -- X t1 t2, where X is presumably a
- -- type/data family returning ConstraintKind
- -> check_irred_pred dflags ctxt pred tys
-
- | (TyVarTy _, arg_tys) <- tcSplitAppTys pred
- = check_irred_pred dflags ctxt pred arg_tys
-
- | otherwise
- = badPred pred
+ = case classifyPredType pred of
+ ClassPred cls tys -> check_class_pred dflags ctxt pred cls tys
+ EqPred ty1 ty2 -> check_eq_pred dflags ctxt pred ty1 ty2
+ TuplePred tys -> check_tuple_pred dflags ctxt pred tys
+ IrredPred _ -> check_irred_pred dflags ctxt pred
-badPred :: PredType -> TcM ()
-badPred pred = failWithTc (ptext (sLit "Malformed predicate") <+> quotes (ppr pred))
-check_class_pred :: DynFlags -> UserTypeCtxt -> Class -> [TcType] -> TcM ()
-check_class_pred dflags ctxt cls tys
+check_class_pred :: DynFlags -> UserTypeCtxt -> PredType -> Class -> [TcType] -> TcM ()
+check_class_pred dflags ctxt pred cls tys
= do { -- Class predicates are valid in all contexts
; checkTc (arity == n_tys) arity_err
+ ; checkTc (not (isIPClass cls) || okIPCtxt ctxt)
+ (badIPPred pred)
+
-- Check the form of the argument types
; mapM_ checkValidMonoType tys
; checkTc (check_class_pred_tys dflags ctxt tys)
@@ -502,13 +507,23 @@ check_class_pred dflags ctxt cls tys
arity_err = arityErr "Class" class_name arity n_tys
how_to_allow = parens (ptext (sLit "Use FlexibleContexts to permit this"))
+okIPCtxt :: UserTypeCtxt -> Bool
+ -- See Note [Implicit parameters in instance decls]
+okIPCtxt (ClassSCCtxt {}) = False
+okIPCtxt (InstDeclCtxt {}) = False
+okIPCtxt (SpecInstCtxt {}) = False
+okIPCtxt _ = True
+
+badIPPred :: PredType -> SDoc
+badIPPred pred = ptext (sLit "Illegal implicit parameter") <+> quotes (ppr pred)
+
-check_eq_pred :: DynFlags -> UserTypeCtxt -> TcType -> TcType -> TcM ()
-check_eq_pred dflags _ctxt ty1 ty2
+check_eq_pred :: DynFlags -> UserTypeCtxt -> PredType -> TcType -> TcType -> TcM ()
+check_eq_pred dflags _ctxt pred ty1 ty2
= do { -- Equational constraints are valid in all contexts if type
-- families are permitted
; checkTc (xopt Opt_TypeFamilies dflags || xopt Opt_GADTs dflags)
- (eqPredTyErr (mkEqPred ty1 ty2))
+ (eqPredTyErr pred)
-- Check the form of the argument types
; checkValidMonoType ty1
@@ -523,8 +538,8 @@ check_tuple_pred dflags ctxt pred ts
-- This case will not normally be executed because
-- without -XConstraintKinds tuple types are only kind-checked as *
-check_irred_pred :: DynFlags -> UserTypeCtxt -> PredType -> [TcType] -> TcM ()
-check_irred_pred dflags ctxt pred arg_tys
+check_irred_pred :: DynFlags -> UserTypeCtxt -> PredType -> TcM ()
+check_irred_pred dflags ctxt pred
-- The predicate looks like (X t1 t2) or (x t1 t2) :: Constraint
-- But X is not a synonym; that's been expanded already
--
@@ -541,9 +556,9 @@ check_irred_pred dflags ctxt pred arg_tys
--
-- It is equally dangerous to allow them in instance heads because in that case the
-- Paterson conditions may not detect duplication of a type variable or size change.
- = do { checkTc (xopt Opt_ConstraintKinds dflags)
+ = do { checkValidMonoType pred
+ ; checkTc (xopt Opt_ConstraintKinds dflags)
(predIrredErr pred)
- ; mapM_ checkValidMonoType arg_tys
; unless (xopt Opt_UndecidableInstances dflags) $
-- Make sure it is OK to have an irred pred in this context
checkTc (case ctxt of ClassSCCtxt _ -> False; InstDeclCtxt -> False; _ -> True)
@@ -644,7 +659,7 @@ unambiguous. See Note [Impedence matching] in TcBinds.
This test is very conveniently implemented by calling
tcSubType
This neatly takes account of the functional dependecy stuff above,
-and implict parameter (see Note [Implicit parameters and ambiguity]).
+and implicit parameter (see Note [Implicit parameters and ambiguity]).
What about this, though?
g :: C [a] => Int
@@ -759,11 +774,10 @@ checkValidInstHead ctxt clas cls_args
; checkTc (xopt Opt_FlexibleInstances dflags ||
all tcInstHeadTyAppAllTyVars ty_args)
(instTypeErr clas cls_args head_type_args_tyvars_msg)
- ; checkTc (xopt Opt_NullaryTypeClasses dflags ||
- not (null ty_args))
- (instTypeErr clas cls_args head_no_type_msg)
; checkTc (xopt Opt_MultiParamTypeClasses dflags ||
- length ty_args <= 1) -- Only count type arguments
+ length ty_args == 1 || -- Only count type arguments
+ (xopt Opt_NullaryTypeClasses dflags &&
+ null ty_args))
(instTypeErr clas cls_args head_one_type_msg) }
-- May not contain type family applications
@@ -793,11 +807,7 @@ checkValidInstHead ctxt clas cls_args
head_one_type_msg = parens (
text "Only one type can be given in an instance head." $$
- text "Use MultiParamTypeClasses if you want to allow more.")
-
- head_no_type_msg = parens (
- text "No parameters in the instance head." $$
- text "Use NullaryTypeClasses if you want to allow this.")
+ text "Use MultiParamTypeClasses if you want to allow more, or zero.")
abstract_class_msg =
text "The class is abstract, manual instances are not permitted."
@@ -869,8 +879,8 @@ checkValidInstance ctxt hs_type ty
else checkInstTermination inst_tys theta
; case (checkInstCoverage undecidable_ok clas theta inst_tys) of
- Nothing -> return () -- Check succeeded
- Just msg -> addErrTc (instTypeErr clas inst_tys msg)
+ IsValid -> return () -- Check succeeded
+ NotValid msg -> addErrTc (instTypeErr clas inst_tys msg)
; return (tvs, theta, clas, inst_tys) }
@@ -1104,7 +1114,14 @@ checkValidTyFamInst mb_clsinfo fam_tc
= setSrcSpan loc $
do { checkValidFamPats fam_tc tvs typats
- -- The right-hand side is a tau type
+ -- The argument patterns, and RHS, are all boxed tau types
+ -- E.g Reject type family F (a :: k1) :: k2
+ -- type instance F (forall a. a->a) = ...
+ -- type instance F Int# = ...
+ -- type instance F Int = forall a. a->a
+ -- type instance F Int = Int#
+ -- See Trac #9357
+ ; mapM_ checkValidMonoType typats
; checkValidMonoType rhs
-- We have a decidable instance unless otherwise permitted
@@ -1124,7 +1141,7 @@ checkFamInstRhs :: [Type] -- lhs
-> [(TyCon, [Type])] -- type family instances
-> [MsgDoc]
checkFamInstRhs lhsTys famInsts
- = mapCatMaybes check famInsts
+ = mapMaybe check famInsts
where
size = sizeTypes lhsTys
fvs = fvTypes lhsTys
@@ -1154,26 +1171,18 @@ checkValidFamPats :: TyCon -> [TyVar] -> [Type] -> TcM ()
-- type instance F (T a) = a
-- c) Have the right number of patterns
checkValidFamPats fam_tc tvs ty_pats
- = do { -- A family instance must have exactly the same number of type
- -- parameters as the family declaration. You can't write
- -- type family F a :: * -> *
- -- type instance F Int y = y
- -- because then the type (F Int) would be like (\y.y)
- checkTc (length ty_pats == fam_arity) $
- wrongNumberOfParmsErr (fam_arity - length fam_kvs) -- report only types
- ; mapM_ checkTyFamFreeness ty_pats
+ = ASSERT( length ty_pats == tyConArity fam_tc )
+ -- A family instance must have exactly the same number of type
+ -- parameters as the family declaration. You can't write
+ -- type family F a :: * -> *
+ -- type instance F Int y = y
+ -- because then the type (F Int) would be like (\y.y)
+ -- But this is checked at the time the axiom is created
+ do { mapM_ checkTyFamFreeness ty_pats
; let unbound_tvs = filterOut (`elemVarSet` exactTyVarsOfTypes ty_pats) tvs
; checkTc (null unbound_tvs) (famPatErr fam_tc unbound_tvs ty_pats) }
- where fam_arity = tyConArity fam_tc
- (fam_kvs, _) = splitForAllTys (tyConKind fam_tc)
-
-wrongNumberOfParmsErr :: Arity -> SDoc
-wrongNumberOfParmsErr exp_arity
- = ptext (sLit "Number of parameters must match family declaration; expected")
- <+> ppr exp_arity
-- Ensure that no type family instances occur in a type.
---
checkTyFamFreeness :: Type -> TcM ()
checkTyFamFreeness ty
= checkTc (isTyFamFree ty) $
diff --git a/compiler/types/Class.lhs b/compiler/types/Class.lhs
index 2d145683bf1b..9863b8d98f57 100644
--- a/compiler/types/Class.lhs
+++ b/compiler/types/Class.lhs
@@ -6,7 +6,8 @@
The @Class@ datatype
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -16,7 +17,7 @@ The @Class@ datatype
module Class (
Class,
ClassOpItem, DefMeth (..),
- ClassATItem,
+ ClassATItem(..),
ClassMinimalDef,
defMethSpecOfDefMeth,
@@ -31,8 +32,7 @@ module Class (
#include "HsVersions.h"
import {-# SOURCE #-} TyCon ( TyCon, tyConName, tyConUnique )
-import {-# SOURCE #-} TypeRep ( PredType )
-import CoAxiom
+import {-# SOURCE #-} TypeRep ( Type, PredType )
import Var
import Name
import BasicTypes
@@ -99,10 +99,10 @@ data DefMeth = NoDefMeth -- No default method
| GenDefMeth Name -- A generic default method
deriving Eq
-type ClassATItem = (TyCon, -- See Note [Associated type tyvar names]
- [CoAxBranch]) -- Default associated types from these templates
- -- We can have more than one default per type; see
- -- Note [Associated type defaults] in TcTyClsDecls
+data ClassATItem
+ = ATI TyCon -- See Note [Associated type tyvar names]
+ (Maybe Type) -- Default associated type (if any) from this template
+ -- Note [Associated type defaults]
type ClassMinimalDef = BooleanFormula Name -- Required methods
@@ -114,9 +114,39 @@ defMethSpecOfDefMeth meth
NoDefMeth -> NoDM
DefMeth _ -> VanillaDM
GenDefMeth _ -> GenericDM
-
\end{code}
+Note [Associated type defaults]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The following is an example of associated type defaults:
+ class C a where
+ data D a r
+
+ type F x a b :: *
+ type F p q r = (p,q)->r -- Default
+
+Note that
+
+ * The TyCons for the associated types *share type variables* with the
+ class, so that we can tell which argument positions should be
+ instantiated in an instance decl. (The first for 'D', the second
+ for 'F'.)
+
+ * We can have default definitions only for *type* families,
+ not data families
+
+ * In the default decl, the "patterns" should all be type variables,
+ but (in the source language) they don't need to be the same as in
+ the 'type' decl signature or the class. It's more like a
+ free-standing 'type instance' declaration.
+
+ * HOWEVER, in the internal ClassATItem we rename the RHS to match the
+ tyConTyVars of the family TyCon. So in the example above we'd get
+ a ClassATItem of
+ ATI F ((x,a) -> b)
+ So the tyConTyVars of the family TyCon bind the free vars of
+ the default Type rhs
+
The @mkClass@ function fills in the indirect superclasses.
\begin{code}
@@ -197,7 +227,7 @@ classOpItems = classOpStuff
classATs :: Class -> [TyCon]
classATs (Class { classATStuff = at_stuff })
- = [tc | (tc, _) <- at_stuff]
+ = [tc | ATI tc _ <- at_stuff]
classATItems :: Class -> [ClassATItem]
classATItems = classATStuff
diff --git a/compiler/types/CoAxiom.lhs b/compiler/types/CoAxiom.lhs
index a0a497483122..06b74a43f013 100644
--- a/compiler/types/CoAxiom.lhs
+++ b/compiler/types/CoAxiom.lhs
@@ -4,7 +4,7 @@
\begin{code}
-{-# LANGUAGE GADTs, ScopedTypeVariables #-}
+{-# LANGUAGE CPP, DeriveDataTypeable, GADTs, ScopedTypeVariables #-}
-- | Module for coercion axioms, used to represent type family instances
-- and newtypes
@@ -327,6 +327,7 @@ isImplicitCoAxiom = co_ax_implicit
coAxBranchIncomps :: CoAxBranch -> [CoAxBranch]
coAxBranchIncomps = cab_incomps
+-- See Note [Compatibility checking] in FamInstEnv
placeHolderIncomps :: [CoAxBranch]
placeHolderIncomps = panic "placeHolderIncomps"
diff --git a/compiler/types/Coercion.lhs b/compiler/types/Coercion.lhs
index af2b2fa4838a..38f38ed50b12 100644
--- a/compiler/types/Coercion.lhs
+++ b/compiler/types/Coercion.lhs
@@ -3,6 +3,8 @@
%
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+
-- | Module for (a) type kinds and (b) type coercions,
-- as used in System FC. See 'CoreSyn.Expr' for
-- more on System FC and how coercions fit into it.
@@ -16,7 +18,7 @@ module Coercion (
-- ** Functions over coercions
coVarKind, coVarRole,
coercionType, coercionKind, coercionKinds, isReflCo,
- isReflCo_maybe, coercionRole,
+ isReflCo_maybe, coercionRole, coercionKindRole,
mkCoercionType,
-- ** Constructing coercions
@@ -27,7 +29,7 @@ module Coercion (
mkSymCo, mkTransCo, mkNthCo, mkNthCoRole, mkLRCo,
mkInstCo, mkAppCo, mkAppCoFlexible, mkTyConAppCo, mkFunCo,
mkForAllCo, mkUnsafeCo, mkUnivCo, mkSubCo, mkPhantomCo,
- mkNewTypeCo, maybeSubCo, maybeSubCo2,
+ mkNewTypeCo, downgradeRole,
mkAxiomRuleCo,
-- ** Decomposition
@@ -38,7 +40,7 @@ module Coercion (
splitAppCo_maybe,
splitForAllCo_maybe,
nthRole, tyConRolesX,
- nextRole,
+ nextRole, setNominalRole_maybe,
-- ** Coercion variables
mkCoVar, isCoVar, isCoVarType, coVarName, setCoVarName, setCoVarUnique,
@@ -102,8 +104,10 @@ import PrelNames ( funTyConKey, eqPrimTyConKey, eqReprPrimTyConKey )
import Control.Applicative
import Data.Traversable (traverse, sequenceA)
import FastString
+import ListSetOps
import qualified Data.Data as Data hiding ( TyCon )
+import Control.Arrow ( first )
\end{code}
%************************************************************************
@@ -632,7 +636,7 @@ pprCo, pprParendCo :: Coercion -> SDoc
pprCo co = ppr_co TopPrec co
pprParendCo co = ppr_co TyConPrec co
-ppr_co :: Prec -> Coercion -> SDoc
+ppr_co :: TyPrec -> Coercion -> SDoc
ppr_co _ (Refl r ty) = angleBrackets (ppr ty) <> ppr_role r
ppr_co p co@(TyConAppCo _ tc [_,_])
@@ -695,7 +699,7 @@ instance Outputable LeftOrRight where
ppr CLeft = ptext (sLit "Left")
ppr CRight = ptext (sLit "Right")
-ppr_fun_co :: Prec -> Coercion -> SDoc
+ppr_fun_co :: TyPrec -> Coercion -> SDoc
ppr_fun_co p co = pprArrowChain p (split co)
where
split :: Coercion -> [SDoc]
@@ -704,7 +708,7 @@ ppr_fun_co p co = pprArrowChain p (split co)
= ppr_co FunPrec arg : split res
split co = [ppr_co TopPrec co]
-ppr_forall_co :: Prec -> Coercion -> SDoc
+ppr_forall_co :: TyPrec -> Coercion -> SDoc
ppr_forall_co p ty
= maybeParen p FunPrec $
sep [pprForAll tvs, ppr_co TopPrec rho]
@@ -724,7 +728,7 @@ pprCoAxBranch :: TyCon -> CoAxBranch -> SDoc
pprCoAxBranch fam_tc (CoAxBranch { cab_tvs = tvs
, cab_lhs = lhs
, cab_rhs = rhs })
- = hang (ifPprDebug (pprForAll tvs))
+ = hang (pprUserForAll tvs)
2 (hang (pprTypeApp fam_tc lhs) 2 (equals <+> (ppr rhs)))
pprCoAxBranchHdr :: CoAxiom br -> BranchIndex -> SDoc
@@ -770,7 +774,7 @@ splitAppCo_maybe (AppCo co1 co2) = Just (co1, co2)
splitAppCo_maybe (TyConAppCo r tc cos)
| isDecomposableTyCon tc || cos `lengthExceeds` tyConArity tc
, Just (cos', co') <- snocView cos
- , Just co'' <- unSubCo_maybe co'
+ , Just co'' <- setNominalRole_maybe co'
= Just (mkTyConAppCo r tc cos', co'') -- Never create unsaturated type family apps!
-- Use mkTyConAppCo to preserve the invariant
-- that identity coercions are always represented by Refl
@@ -829,6 +833,55 @@ isReflCo_maybe _ = Nothing
%* *
%************************************************************************
+Note [Role twiddling functions]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a plethora of functions for twiddling roles:
+
+mkSubCo: Requires a nominal input coercion and always produces a
+representational output. This is used when you (the programmer) are sure you
+know exactly that role you have and what you want.
+
+setRole_maybe: This function takes both the input role and the output role
+as parameters. (The *output* role comes first!) It can only *downgrade* a
+role -- that is, change it from N to R or P, or from R to P. This one-way
+behavior is why there is the "_maybe". If an upgrade is requested, this
+function produces Nothing. This is used when you need to change the role of a
+coercion, but you're not sure (as you're writing the code) of which roles are
+involved.
+
+This function could have been written using coercionRole to ascertain the role
+of the input. But, that function is recursive, and the caller of setRole_maybe
+often knows the input role. So, this is more efficient.
+
+downgradeRole: This is just like setRole_maybe, but it panics if the conversion
+isn't a downgrade.
+
+setNominalRole_maybe: This is the only function that can *upgrade* a coercion. The result
+(if it exists) is always Nominal. The input can be at any role. It works on a
+"best effort" basis, as it should never be strictly necessary to upgrade a coercion
+during compilation. It is currently only used within GHC in splitAppCo_maybe. In order
+to be a proper inverse of mkAppCo, the second coercion that splitAppCo_maybe returns
+must be nominal. But, it's conceivable that splitAppCo_maybe is operating over a
+TyConAppCo that uses a representational coercion. Hence the need for setNominalRole_maybe.
+splitAppCo_maybe, in turn, is used only within coercion optimization -- thus, it is
+not absolutely critical that setNominalRole_maybe be complete.
+
+Note that setNominalRole_maybe will never upgrade a phantom UnivCo. Phantom
+UnivCos are perfectly type-safe, whereas representational and nominal ones are
+not. Indeed, `unsafeCoerce` is implemented via a representational UnivCo.
+(Nominal ones are no worse than representational ones, so this function *will*
+change a UnivCo Representational to a UnivCo Nominal.)
+
+Conal Elliott also came across a need for this function while working with the GHC
+API, as he was decomposing Core casts. The Core casts use representational coercions,
+as they must, but his use case required nominal coercions (he was building a GADT).
+So, that's why this function is exported from this module.
+
+One might ask: shouldn't setRole_maybe just use setNominalRole_maybe as appropriate?
+I (Richard E.) have decided not to do this, because upgrading a role is bizarre and
+a caller should have to ask for this behavior explicitly.
+
\begin{code}
mkCoVarCo :: CoVar -> Coercion
-- cv :: s ~# t
@@ -845,9 +898,9 @@ mkAxInstCo :: Role -> CoAxiom br -> BranchIndex -> [Type] -> Coercion
-- mkAxInstCo can legitimately be called over-staturated;
-- i.e. with more type arguments than the coercion requires
mkAxInstCo role ax index tys
- | arity == n_tys = maybeSubCo2 role ax_role $ AxiomInstCo ax_br index rtys
+ | arity == n_tys = downgradeRole role ax_role $ AxiomInstCo ax_br index rtys
| otherwise = ASSERT( arity < n_tys )
- maybeSubCo2 role ax_role $
+ downgradeRole role ax_role $
foldl AppCo (AxiomInstCo ax_br index (take arity rtys))
(drop arity rtys)
where
@@ -899,10 +952,12 @@ mkAppCo co1 co2 = mkAppCoFlexible co1 Nominal co2
mkAppCoFlexible :: Coercion -> Role -> Coercion -> Coercion
mkAppCoFlexible (Refl r ty1) _ (Refl _ ty2)
= Refl r (mkAppTy ty1 ty2)
-mkAppCoFlexible (Refl r (TyConApp tc tys)) r2 co2
+mkAppCoFlexible (Refl r ty1) r2 co2
+ | Just (tc, tys) <- splitTyConApp_maybe ty1
+ -- Expand type synonyms; a TyConAppCo can't have a type synonym (Trac #9102)
= TyConAppCo r tc (zip_roles (tyConRolesX r tc) tys)
where
- zip_roles (r1:_) [] = [maybeSubCo2 r1 r2 co2]
+ zip_roles (r1:_) [] = [downgradeRole r1 r2 co2]
zip_roles (r1:rs) (ty1:tys) = mkReflCo r1 ty1 : zip_roles rs tys
zip_roles _ _ = panic "zip_roles" -- but the roles are infinite...
mkAppCoFlexible (TyConAppCo r tc cos) r2 co
@@ -911,7 +966,7 @@ mkAppCoFlexible (TyConAppCo r tc cos) r2 co
TyConAppCo Nominal tc (cos ++ [co])
Representational -> TyConAppCo Representational tc (cos ++ [co'])
where new_role = (tyConRolesX Representational tc) !! (length cos)
- co' = maybeSubCo2 new_role r2 co
+ co' = downgradeRole new_role r2 co
Phantom -> TyConAppCo Phantom tc (cos ++ [mkPhantomCo co])
mkAppCoFlexible co1 _r2 co2 = ASSERT( _r2 == Nominal )
@@ -970,7 +1025,7 @@ mkTransCo co1 co2 = TransCo co1 co2
-- sure this request is reasonable
mkNthCoRole :: Role -> Int -> Coercion -> Coercion
mkNthCoRole role n co
- = maybeSubCo2 role nth_role $ nth_co
+ = downgradeRole role nth_role $ nth_co
where
nth_co = mkNthCo n co
nth_role = coercionRole nth_co
@@ -999,10 +1054,9 @@ ok_tc_app ty n = case splitTyConApp_maybe ty of
mkInstCo :: Coercion -> Type -> Coercion
mkInstCo co ty = InstCo co ty
--- | Manufacture a coercion from thin air. Needless to say, this is
--- not usually safe, but it is used when we know we are dealing with
--- bottom, which is one case in which it is safe. This is also used
--- to implement the @unsafeCoerce#@ primitive. Optimise by pushing
+-- | Manufacture an unsafe coercion from thin air.
+-- Currently (May 14) this is used only to implement the
+-- @unsafeCoerce#@ primitive. Optimise by pushing
-- down through type constructors.
mkUnsafeCo :: Type -> Type -> Coercion
mkUnsafeCo = mkUnivCo Representational
@@ -1015,7 +1069,7 @@ mkUnivCo role ty1 ty2
mkAxiomRuleCo :: CoAxiomRule -> [Type] -> [Coercion] -> Coercion
mkAxiomRuleCo = AxiomRuleCo
--- input coercion is Nominal
+-- input coercion is Nominal; see also Note [Role twiddling functions]
mkSubCo :: Coercion -> Coercion
mkSubCo (Refl Nominal ty) = Refl Representational ty
mkSubCo (TyConAppCo Nominal tc cos)
@@ -1024,44 +1078,51 @@ mkSubCo (UnivCo Nominal ty1 ty2) = UnivCo Representational ty1 ty2
mkSubCo co = ASSERT2( coercionRole co == Nominal, ppr co <+> ppr (coercionRole co) )
SubCo co
-
--- takes a Nominal coercion and possibly casts it into a Representational one
-maybeSubCo :: Role -> Coercion -> Coercion
-maybeSubCo Nominal = id
-maybeSubCo Representational = mkSubCo
-maybeSubCo Phantom = pprPanic "maybeSubCo Phantom" . ppr
-
-maybeSubCo2_maybe :: Role -- desired role
- -> Role -- current role
- -> Coercion -> Maybe Coercion
-maybeSubCo2_maybe Representational Nominal = Just . mkSubCo
-maybeSubCo2_maybe Nominal Representational = const Nothing
-maybeSubCo2_maybe Phantom Phantom = Just
-maybeSubCo2_maybe Phantom _ = Just . mkPhantomCo
-maybeSubCo2_maybe _ Phantom = const Nothing
-maybeSubCo2_maybe _ _ = Just
-
-maybeSubCo2 :: Role -- desired role
- -> Role -- current role
- -> Coercion -> Coercion
-maybeSubCo2 r1 r2 co
- = case maybeSubCo2_maybe r1 r2 co of
+-- only *downgrades* a role. See Note [Role twiddling functions]
+setRole_maybe :: Role -- desired role
+ -> Role -- current role
+ -> Coercion -> Maybe Coercion
+setRole_maybe Representational Nominal = Just . mkSubCo
+setRole_maybe Nominal Representational = const Nothing
+setRole_maybe Phantom Phantom = Just
+setRole_maybe Phantom _ = Just . mkPhantomCo
+setRole_maybe _ Phantom = const Nothing
+setRole_maybe _ _ = Just
+
+-- panics if the requested conversion is not a downgrade.
+-- See also Note [Role twiddling functions]
+downgradeRole :: Role -- desired role
+ -> Role -- current role
+ -> Coercion -> Coercion
+downgradeRole r1 r2 co
+ = case setRole_maybe r1 r2 co of
Just co' -> co'
- Nothing -> pprPanic "maybeSubCo2" (ppr co)
-
--- if co is Nominal, returns it; otherwise, unwraps a SubCo; otherwise, fails
-unSubCo_maybe :: Coercion -> Maybe Coercion
-unSubCo_maybe (SubCo co) = Just co
-unSubCo_maybe (Refl _ ty) = Just $ Refl Nominal ty
-unSubCo_maybe (TyConAppCo Representational tc cos)
- = do { cos' <- mapM unSubCo_maybe cos
+ Nothing -> pprPanic "downgradeRole" (ppr co)
+
+-- Converts a coercion to be nominal, if possible.
+-- See also Note [Role twiddling functions]
+setNominalRole_maybe :: Coercion -> Maybe Coercion
+setNominalRole_maybe co
+ | Nominal <- coercionRole co = Just co
+setNominalRole_maybe (SubCo co) = Just co
+setNominalRole_maybe (Refl _ ty) = Just $ Refl Nominal ty
+setNominalRole_maybe (TyConAppCo Representational tc coes)
+ = do { cos' <- mapM setNominalRole_maybe coes
; return $ TyConAppCo Nominal tc cos' }
-unSubCo_maybe (UnivCo Representational ty1 ty2) = Just $ UnivCo Nominal ty1 ty2
+setNominalRole_maybe (UnivCo Representational ty1 ty2) = Just $ UnivCo Nominal ty1 ty2
-- We do *not* promote UnivCo Phantom, as that's unsafe.
-- UnivCo Nominal is no more unsafe than UnivCo Representational
-unSubCo_maybe co
- | Nominal <- coercionRole co = Just co
-unSubCo_maybe _ = Nothing
+setNominalRole_maybe (TransCo co1 co2)
+ = TransCo <$> setNominalRole_maybe co1 <*> setNominalRole_maybe co2
+setNominalRole_maybe (AppCo co1 co2)
+ = AppCo <$> setNominalRole_maybe co1 <*> pure co2
+setNominalRole_maybe (ForAllCo tv co)
+ = ForAllCo tv <$> setNominalRole_maybe co
+setNominalRole_maybe (NthCo n co)
+ = NthCo n <$> setNominalRole_maybe co
+setNominalRole_maybe (InstCo co ty)
+ = InstCo <$> setNominalRole_maybe co <*> pure ty
+setNominalRole_maybe _ = Nothing
-- takes any coercion and turns it into a Phantom coercion
mkPhantomCo :: Coercion -> Coercion
@@ -1556,7 +1617,7 @@ failing for reason 2) is fine. matchAxiom is trying to find a set of coercions
that match, but it may fail, and this is healthy behavior. Bottom line: if
you find that liftCoSubst is doing weird things (like leaving out-of-scope
variables lying around), disable coercion optimization (bypassing matchAxiom)
-and use maybeSubCo2 instead of maybeSubCo2_maybe. The panic will then happen,
+and use downgradeRole instead of setRole_maybe. The panic will then happen,
and you may learn something useful.
\begin{code}
@@ -1566,7 +1627,7 @@ liftCoSubstTyVar (LCS _ cenv) r tv
= do { co <- lookupVarEnv cenv tv
; let co_role = coercionRole co -- could theoretically take this as
-- a parameter, but painful
- ; maybeSubCo2_maybe r co_role co } -- see Note [liftCoSubstTyVar]
+ ; setRole_maybe r co_role co } -- see Note [liftCoSubstTyVar]
liftCoSubstTyVarBndr :: LiftCoSubst -> TyVar -> (LiftCoSubst, TyVar)
liftCoSubstTyVarBndr subst@(LCS in_scope cenv) old_var
@@ -1733,10 +1794,23 @@ seqCos (co:cos) = seqCo co `seq` seqCos cos
%* *
%************************************************************************
+Note [Computing a coercion kind and role]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+To compute a coercion's kind is straightforward: see coercionKind.
+But to compute a coercion's role, in the case for NthCo we need
+its kind as well. So if we have two separate functions (one for kinds
+and one for roles) we can get exponentially bad behaviour, since each
+NthCo node makes a separate call to coercionKind, which traverses the
+sub-tree again. This was part of the problem in Trac #9233.
+
+Solution: compute both together; hence coercionKindRole. We keep a
+separate coercionKind function because it's a bit more efficient if
+the kind is all you want.
+
\begin{code}
coercionType :: Coercion -> Type
-coercionType co = case coercionKind co of
- Pair ty1 ty2 -> mkCoercionType (coercionRole co) ty1 ty2
+coercionType co = case coercionKindRole co of
+ (Pair ty1 ty2, r) -> mkCoercionType r ty1 ty2
------------------
-- | If it is the case that
@@ -1768,11 +1842,10 @@ coercionKind co = go co
go (InstCo aco ty) = go_app aco [ty]
go (SubCo co) = go co
go (AxiomRuleCo ax tys cos) =
- case coaxrProves ax tys (map coercionKind cos) of
+ case coaxrProves ax tys (map go cos) of
Just res -> res
Nothing -> panic "coercionKind: Malformed coercion"
-
go_app :: Coercion -> [Type] -> Pair Type
-- Collect up all the arguments and apply all at once
-- See Note [Nested InstCos]
@@ -1783,25 +1856,54 @@ coercionKind co = go co
coercionKinds :: [Coercion] -> Pair [Type]
coercionKinds tys = sequenceA $ map coercionKind tys
-coercionRole :: Coercion -> Role
-coercionRole = go
+-- | Get a coercion's kind and role.
+-- Why both at once? See Note [Computing a coercion kind and role]
+coercionKindRole :: Coercion -> (Pair Type, Role)
+coercionKindRole = go
where
- go (Refl r _) = r
- go (TyConAppCo r _ _) = r
- go (AppCo co _) = go co
- go (ForAllCo _ co) = go co
- go (CoVarCo cv) = coVarRole cv
- go (AxiomInstCo ax _ _) = coAxiomRole ax
- go (UnivCo r _ _) = r
- go (SymCo co) = go co
- go (TransCo co1 _) = go co1 -- same as go co2
- go (NthCo n co) = let Pair ty1 _ = coercionKind co
- (tc, _) = splitTyConApp ty1
- in nthRole (coercionRole co) tc n
- go (LRCo _ _) = Nominal
- go (InstCo co _) = go co
- go (SubCo _) = Representational
- go (AxiomRuleCo c _ _) = coaxrRole c
+ go (Refl r ty) = (Pair ty ty, r)
+ go (TyConAppCo r tc cos)
+ = (mkTyConApp tc <$> (sequenceA $ map coercionKind cos), r)
+ go (AppCo co1 co2)
+ = let (tys1, r1) = go co1 in
+ (mkAppTy <$> tys1 <*> coercionKind co2, r1)
+ go (ForAllCo tv co)
+ = let (tys, r) = go co in
+ (mkForAllTy tv <$> tys, r)
+ go (CoVarCo cv) = (toPair $ coVarKind cv, coVarRole cv)
+ go co@(AxiomInstCo ax _ _) = (coercionKind co, coAxiomRole ax)
+ go (UnivCo r ty1 ty2) = (Pair ty1 ty2, r)
+ go (SymCo co) = first swap $ go co
+ go (TransCo co1 co2)
+ = let (tys1, r) = go co1 in
+ (Pair (pFst tys1) (pSnd $ coercionKind co2), r)
+ go (NthCo d co)
+ = let (Pair t1 t2, r) = go co
+ (tc1, args1) = splitTyConApp t1
+ (_tc2, args2) = splitTyConApp t2
+ in
+ ASSERT( tc1 == _tc2 )
+ ((`getNth` d) <$> Pair args1 args2, nthRole r tc1 d)
+ go co@(LRCo {}) = (coercionKind co, Nominal)
+ go (InstCo co ty) = go_app co [ty]
+ go (SubCo co) = (coercionKind co, Representational)
+ go co@(AxiomRuleCo ax _ _) = (coercionKind co, coaxrRole ax)
+
+ go_app :: Coercion -> [Type] -> (Pair Type, Role)
+ -- Collect up all the arguments and apply all at once
+ -- See Note [Nested InstCos]
+ go_app (InstCo co ty) tys = go_app co (ty:tys)
+ go_app co tys
+ = let (pair, r) = go co in
+ ((`applyTys` tys) <$> pair, r)
+
+-- | Retrieve the role from a coercion.
+coercionRole :: Coercion -> Role
+coercionRole = snd . coercionKindRole
+ -- There's not a better way to do this, because NthCo needs the *kind*
+ -- and role of its argument. Luckily, laziness should generally avoid
+ -- the need for computing kinds in other cases.
+
\end{code}
Note [Nested InstCos]
diff --git a/compiler/types/FamInstEnv.lhs b/compiler/types/FamInstEnv.lhs
index c17668bbb570..1308984f4f68 100644
--- a/compiler/types/FamInstEnv.lhs
+++ b/compiler/types/FamInstEnv.lhs
@@ -5,13 +5,12 @@
FamInstEnv: Type checked family instance declarations
\begin{code}
-
-{-# LANGUAGE GADTs #-}
+{-# LANGUAGE CPP, GADTs #-}
module FamInstEnv (
FamInst(..), FamFlavor(..), famInstAxiom, famInstTyCon, famInstRHS,
famInstsRepTyCons, famInstRepTyCon_maybe, dataFamInstRepTyCon,
- pprFamInst, pprFamInstHdr, pprFamInsts,
+ pprFamInst, pprFamInsts,
mkImportedFamInst,
FamInstEnvs, FamInstEnv, emptyFamInstEnv, emptyFamInstEnvs,
@@ -47,7 +46,6 @@ import Coercion
import CoAxiom
import VarSet
import VarEnv
-import Module( isInteractiveModule )
import Name
import UniqFM
import Outputable
@@ -167,12 +165,13 @@ instance Outputable FamInst where
ppr = pprFamInst
-- Prints the FamInst as a family instance declaration
+-- NB: FamInstEnv.pprFamInst is used only for internal, debug printing
+-- See pprTyThing.pprFamInst for printing for the user
pprFamInst :: FamInst -> SDoc
pprFamInst famInst
= hang (pprFamInstHdr famInst)
2 (vcat [ ifPprDebug (ptext (sLit "Coercion axiom:") <+> ppr ax)
- , ifPprDebug (ptext (sLit "RHS:") <+> ppr (famInstRHS famInst))
- , ptext (sLit "--") <+> pprDefinedAt (getName famInst)])
+ , ifPprDebug (ptext (sLit "RHS:") <+> ppr (famInstRHS famInst)) ])
where
ax = fi_axiom famInst
@@ -199,6 +198,9 @@ pprFamInstHdr fi@(FamInst {fi_flavor = flavor})
else pprTypeApp fam_tc (etad_lhs_tys ++ mkTyVarTys extra_tvs)
-- Without -dppr-debug, eta-expand
-- See Trac #8674
+ -- (This is probably over the top now that we use this
+ -- only for internal debug printing; PprTyThing.pprFamInst
+ -- is used for user-level printing.)
| otherwise
= vanilla_pp_head
@@ -378,23 +380,21 @@ identicalFamInst :: FamInst -> FamInst -> Bool
-- Same LHS, *and* both instances are on the interactive command line
-- Used for overriding in GHCi
identicalFamInst (FamInst { fi_axiom = ax1 }) (FamInst { fi_axiom = ax2 })
- = isInteractiveModule (nameModule (coAxiomName ax1))
- && isInteractiveModule (nameModule (coAxiomName ax2))
- && coAxiomTyCon ax1 == coAxiomTyCon ax2
+ = coAxiomTyCon ax1 == coAxiomTyCon ax2
&& brListLength brs1 == brListLength brs2
- && and (brListZipWith identical_ax_branch brs1 brs2)
- where brs1 = coAxiomBranches ax1
- brs2 = coAxiomBranches ax2
- identical_ax_branch br1 br2
- = length tvs1 == length tvs2
- && length lhs1 == length lhs2
- && and (zipWith (eqTypeX rn_env) lhs1 lhs2)
- where
- tvs1 = coAxBranchTyVars br1
- tvs2 = coAxBranchTyVars br2
- lhs1 = coAxBranchLHS br1
- lhs2 = coAxBranchLHS br2
- rn_env = rnBndrs2 (mkRnEnv2 emptyInScopeSet) tvs1 tvs2
+ && and (brListZipWith identical_branch brs1 brs2)
+ where
+ brs1 = coAxiomBranches ax1
+ brs2 = coAxiomBranches ax2
+
+ identical_branch br1 br2
+ = isJust (tcMatchTys tvs1 lhs1 lhs2)
+ && isJust (tcMatchTys tvs2 lhs2 lhs1)
+ where
+ tvs1 = mkVarSet (coAxBranchTyVars br1)
+ tvs2 = mkVarSet (coAxBranchTyVars br2)
+ lhs1 = coAxBranchLHS br1
+ lhs2 = coAxBranchLHS br2
\end{code}
%************************************************************************
@@ -439,7 +439,7 @@ only when we can be sure that 'a' is not Int.
To achieve this, after finding a possible match within the equations, we have to
go back to all previous equations and check that, under the
substitution induced by the match, other branches are surely apart. (See
-[Apartness].) This is similar to what happens with class
+Note [Apartness].) This is similar to what happens with class
instance selection, when we need to guarantee that there is only a match and
no unifiers. The exact algorithm is different here because the the
potentially-overlapping group is closed.
@@ -475,6 +475,7 @@ irrelevant (clause 1 of compatible) or benign (clause 2 of compatible).
\begin{code}
+-- See Note [Compatibility]
compatibleBranches :: CoAxBranch -> CoAxBranch -> Bool
compatibleBranches (CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 })
(CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 })
@@ -487,6 +488,7 @@ compatibleBranches (CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 })
-- takes a CoAxiom with unknown branch incompatibilities and computes
-- the compatibilities
+-- See Note [Storing compatibility] in CoAxiom
computeAxiomIncomps :: CoAxiom br -> CoAxiom br
computeAxiomIncomps ax@(CoAxiom { co_ax_branches = branches })
= ax { co_ax_branches = go [] branches }
@@ -639,7 +641,7 @@ lookupFamInstEnvConflicts envs fam_inst@(FamInst { fi_axiom = new_axiom })
(ppr tpl_tvs <+> ppr tpl_tys) )
-- Unification will break badly if the variables overlap
-- They shouldn't because we allocate separate uniques for them
- if compatibleBranches (coAxiomSingleBranch old_axiom) (new_branch)
+ if compatibleBranches (coAxiomSingleBranch old_axiom) new_branch
then Nothing
else Just noSubst
-- Note [Family instance overlap conflicts]
@@ -667,7 +669,7 @@ Note [Family instance overlap conflicts]
-- Might be a one-way match or a unifier
type MatchFun = FamInst -- The FamInst template
-> TyVarSet -> [Type] -- fi_tvs, fi_tys of that FamInst
- -> [Type] -- Target to match against
+ -> [Type] -- Target to match against
-> Maybe TvSubst
lookup_fam_inst_env' -- The worker, local to this module
@@ -727,9 +729,9 @@ lookup_fam_inst_env -- The worker, local to this module
-- Precondition: the tycon is saturated (or over-saturated)
-lookup_fam_inst_env match_fun (pkg_ie, home_ie) fam tys =
- lookup_fam_inst_env' match_fun home_ie fam tys ++
- lookup_fam_inst_env' match_fun pkg_ie fam tys
+lookup_fam_inst_env match_fun (pkg_ie, home_ie) fam tys
+ = lookup_fam_inst_env' match_fun home_ie fam tys
+ ++ lookup_fam_inst_env' match_fun pkg_ie fam tys
\end{code}
@@ -745,16 +747,18 @@ which you can't do in Haskell!):
Then looking up (F (Int,Bool) Char) will return a FamInstMatch
(FPair, [Int,Bool,Char])
-
The "extra" type argument [Char] just stays on the end.
-Because of eta-reduction of data family instances (see
-Note [Eta reduction for data family axioms] in TcInstDecls), we must
-handle data families and type families separately here. All instances
-of a type family must have the same arity, so we can precompute the split
-between the match_tys and the overflow tys. This is done in pre_rough_split_tys.
-For data instances, though, we need to re-split for each instance, because
-the breakdown might be different.
+We handle data families and type families separately here:
+
+ * For type families, all instances of a type family must have the
+ same arity, so we can precompute the split between the match_tys
+ and the overflow tys. This is done in pre_rough_split_tys.
+
+ * For data family instances, though, we need to re-split for each
+ instance, because the breakdown might be different for each
+ instance. Why? Because of eta reduction; see Note [Eta reduction
+ for data family axioms]
\begin{code}
@@ -957,9 +961,9 @@ normaliseTcApp env role tc tys
| otherwise -- No unique matching family instance exists;
-- we do not do anything
- = (Refl role ty, ty)
- where
- ty = mkTyConApp tc tys
+ = let (co, ntys) = normaliseTcArgs env role tc tys in
+ (co, mkTyConApp tc ntys)
+
---------------
normaliseTcArgs :: FamInstEnvs -- environment with family instances
diff --git a/compiler/types/InstEnv.lhs b/compiler/types/InstEnv.lhs
index 826537db17bb..708fef1cfe87 100644
--- a/compiler/types/InstEnv.lhs
+++ b/compiler/types/InstEnv.lhs
@@ -7,13 +7,16 @@
The bits common to TcInstDcls and TcDeriv.
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
+
module InstEnv (
- DFunId, OverlapFlag(..), InstMatch, ClsInstLookupResult,
- ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprInstances,
+ DFunId, InstMatch, ClsInstLookupResult,
+ OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe,
+ ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprInstances,
instanceHead, instanceSig, mkLocalInstance, mkImportedInstance,
instanceDFunId, tidyClsInstDFun, instanceRoughTcs,
- InstEnv, emptyInstEnv, extendInstEnv, overwriteInstEnv,
+ InstEnv, emptyInstEnv, extendInstEnv, deleteFromInstEnv, identicalInstHead,
extendInstEnvList, lookupUniqueInstEnv, lookupInstEnv', lookupInstEnv, instEnvElts,
classInstances, orphNamesOfClsInst, instanceBindFun,
instanceCantMatch, roughMatchTcs
@@ -157,22 +160,21 @@ pprInstance :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstance ispec
= hang (pprInstanceHdr ispec)
- 2 (ptext (sLit "--") <+> pprDefinedAt (getName ispec))
+ 2 (vcat [ ptext (sLit "--") <+> pprDefinedAt (getName ispec)
+ , ifPprDebug (ppr (is_dfun ispec)) ])
-- * pprInstanceHdr is used in VStudio to populate the ClassView tree
pprInstanceHdr :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstanceHdr (ClsInst { is_flag = flag, is_dfun = dfun })
= getPprStyle $ \ sty ->
- let theta_to_print
- | debugStyle sty = theta
- | otherwise = drop (dfunNSilent dfun) theta
+ let dfun_ty = idType dfun
+ (tvs, theta, res_ty) = tcSplitSigmaTy dfun_ty
+ theta_to_print = drop (dfunNSilent dfun) theta
-- See Note [Silent superclass arguments] in TcInstDcls
- in ptext (sLit "instance") <+> ppr flag
- <+> sep [pprThetaArrowTy theta_to_print, ppr res_ty]
- where
- (_, theta, res_ty) = tcSplitSigmaTy (idType dfun)
- -- Print without the for-all, which the programmer doesn't write
+ ty_to_print | debugStyle sty = dfun_ty
+ | otherwise = mkSigmaTy tvs theta_to_print res_ty
+ in ptext (sLit "instance") <+> ppr flag <+> pprSigmaType ty_to_print
pprInstances :: [ClsInst] -> SDoc
pprInstances ispecs = vcat (map pprInstance ispecs)
@@ -419,26 +421,22 @@ extendInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
where
add (ClsIE cur_insts) _ = ClsIE (ins_item : cur_insts)
-overwriteInstEnv :: InstEnv -> ClsInst -> InstEnv
-overwriteInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm, is_tys = tys })
- = addToUFM_C add inst_env cls_nm (ClsIE [ins_item])
+deleteFromInstEnv :: InstEnv -> ClsInst -> InstEnv
+deleteFromInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
+ = adjustUFM adjust inst_env cls_nm
where
- add (ClsIE cur_insts) _ = ClsIE (replaceInst cur_insts)
-
- rough_tcs = roughMatchTcs tys
- replaceInst [] = [ins_item]
- replaceInst (item@(ClsInst { is_tcs = mb_tcs, is_tvs = tpl_tvs
- , is_tys = tpl_tys }) : rest)
- -- Fast check for no match, uses the "rough match" fields
- | instanceCantMatch rough_tcs mb_tcs
- = item : replaceInst rest
-
- | let tpl_tv_set = mkVarSet tpl_tvs
- , Just _ <- tcMatchTys tpl_tv_set tpl_tys tys
- = ins_item : rest
-
- | otherwise
- = item : replaceInst rest
+ adjust (ClsIE items) = ClsIE (filterOut (identicalInstHead ins_item) items)
+
+identicalInstHead :: ClsInst -> ClsInst -> Bool
+-- ^ True when when the instance heads are the same
+-- e.g. both are Eq [(a,b)]
+-- Obviously should be insenstive to alpha-renaming
+identicalInstHead (ClsInst { is_cls_nm = cls_nm1, is_tcs = rough1, is_tvs = tvs1, is_tys = tys1 })
+ (ClsInst { is_cls_nm = cls_nm2, is_tcs = rough2, is_tvs = tvs2, is_tys = tys2 })
+ = cls_nm1 == cls_nm2
+ && not (instanceCantMatch rough1 rough2) -- Fast check for no match, uses the "rough match" fields
+ && isJust (tcMatchTys (mkVarSet tvs1) tys1 tys2)
+ && isJust (tcMatchTys (mkVarSet tvs2) tys2 tys1)
\end{code}
@@ -452,6 +450,54 @@ overwriteInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm, is_tys = tys }
the env is kept ordered, the first match must be the only one. The
thing we are looking up can have an arbitrary "flexi" part.
+Note [Rules for instance lookup]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+These functions implement the carefully-written rules in the user
+manual section on "overlapping instances". At risk of duplication,
+here are the rules. If the rules change, change this text and the
+user manual simultaneously. The link may be this:
+http://www.haskell.org/ghc/docs/latest/html/users_guide/type-class-extensions.html#instance-overlap
+
+The willingness to be overlapped or incoherent is a property of the
+instance declaration itself, controlled as follows:
+
+ * An instance is "incoherent"
+ if it has an INCOHERENT pragma, or
+ if it appears in a module compiled with -XIncoherentInstances.
+
+ * An instance is "overlappable"
+ if it has an OVERLAPPABLE or OVERLAPS pragma, or
+ if it appears in a module compiled with -XOverlappingInstances, or
+ if the instance is incoherent.
+
+ * An instance is "overlapping"
+ if it has an OVERLAPPING or OVERLAPS pragma, or
+ if it appears in a module compiled with -XOverlappingInstances, or
+ if the instance is incoherent.
+ compiled with -XOverlappingInstances.
+
+Now suppose that, in some client module, we are searching for an instance
+of the target constraint (C ty1 .. tyn). The search works like this.
+
+ * Find all instances I that match the target constraint; that is, the
+ target constraint is a substitution instance of I. These instance
+ declarations are the candidates.
+
+ * Find all non-candidate instances that unify with the target
+ constraint. Such non-candidates instances might match when the
+ target constraint is further instantiated. If all of them are
+ incoherent, proceed; if not, the search fails.
+
+ * Eliminate any candidate IX for which both of the following hold:
+ * There is another candidate IY that is strictly more specific;
+ that is, IY is a substitution instance of IX but not vice versa.
+
+ * Either IX is overlappable or IY is overlapping.
+
+ * If only one candidate remains, pick it. Otherwise if all remaining
+ candidates are incoherent, pick an arbitrary candidate. Otherwise fail.
+
+
\begin{code}
type DFunInstType = Maybe Type
-- Just ty => Instantiate with this type
@@ -535,8 +581,8 @@ lookupInstEnv' ie cls tys
= find ((item, map (lookup_tv subst) tpl_tvs) : ms) us rest
-- Does not match, so next check whether the things unify
- -- See Note [Overlapping instances] and Note [Incoherent Instances]
- | Incoherent _ <- oflag
+ -- See Note [Overlapping instances] and Note [Incoherent instances]
+ | Incoherent <- overlapMode oflag
= find ms us rest
| otherwise
@@ -565,23 +611,30 @@ lookupInstEnv' ie cls tys
lookupInstEnv :: (InstEnv, InstEnv) -- External and home package inst-env
-> Class -> [Type] -- What we are looking for
-> ClsInstLookupResult
-
+-- ^ See Note [Rules for instance lookup]
lookupInstEnv (pkg_ie, home_ie) cls tys
- = (safe_matches, all_unifs, safe_fail)
+ = (final_matches, final_unifs, safe_fail)
where
(home_matches, home_unifs) = lookupInstEnv' home_ie cls tys
(pkg_matches, pkg_unifs) = lookupInstEnv' pkg_ie cls tys
all_matches = home_matches ++ pkg_matches
all_unifs = home_unifs ++ pkg_unifs
pruned_matches = foldr insert_overlapping [] all_matches
- (safe_matches, safe_fail) = if length pruned_matches == 1
- then check_safe (head pruned_matches) all_matches
- else (pruned_matches, False)
-- Even if the unifs is non-empty (an error situation)
-- we still prune the matches, so that the error message isn't
-- misleading (complaining of multiple matches when some should be
-- overlapped away)
+ (final_matches, safe_fail)
+ = case pruned_matches of
+ [match] -> check_safe match all_matches
+ _ -> (pruned_matches, False)
+
+ -- If the selected match is incoherent, discard all unifiers
+ final_unifs = case final_matches of
+ (m:_) | is_incoherent m -> []
+ _ -> all_unifs
+
-- NOTE [Safe Haskell isSafeOverlap]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- We restrict code compiled in 'Safe' mode from overriding code
@@ -605,7 +658,7 @@ lookupInstEnv (pkg_ie, home_ie) cls tys
if inSameMod x
then go bad unchecked
else go (i:bad) unchecked
-
+
inSameMod b =
let na = getName $ getName inst
la = isInternalName na
@@ -614,64 +667,72 @@ lookupInstEnv (pkg_ie, home_ie) cls tys
in (la && lb) || (nameModule na == nameModule nb)
---------------
+is_incoherent :: InstMatch -> Bool
+is_incoherent (inst, _) = overlapMode (is_flag inst) == Incoherent
+
---------------
insert_overlapping :: InstMatch -> [InstMatch] -> [InstMatch]
--- Add a new solution, knocking out strictly less specific ones
+-- ^ Add a new solution, knocking out strictly less specific ones
+-- See Note [Rules for instance lookup]
insert_overlapping new_item [] = [new_item]
-insert_overlapping new_item (item:items)
- | new_beats_old && old_beats_new = item : insert_overlapping new_item items
- -- Duplicate => keep both for error report
- | new_beats_old = insert_overlapping new_item items
- -- Keep new one
- | old_beats_new = item : items
- -- Keep old one
- | incoherent new_item = item : items -- note [Incoherent instances]
- -- Keep old one
- | incoherent item = new_item : items
- -- Keep new one
- | otherwise = item : insert_overlapping new_item items
- -- Keep both
+insert_overlapping new_item (old_item : old_items)
+ | new_beats_old -- New strictly overrides old
+ , not old_beats_new
+ , new_item `can_override` old_item
+ = insert_overlapping new_item old_items
+
+ | old_beats_new -- Old strictly overrides new
+ , not new_beats_old
+ , old_item `can_override` new_item
+ = old_item : old_items
+
+ -- Discard incoherent instances; see Note [Incoherent instances]
+ | is_incoherent old_item -- Old is incoherent; discard it
+ = insert_overlapping new_item old_items
+ | is_incoherent new_item -- New is incoherent; discard it
+ = old_item : old_items
+
+ -- Equal or incomparable, and neither is incoherent; keep both
+ | otherwise
+ = old_item : insert_overlapping new_item old_items
where
- new_beats_old = new_item `beats` item
- old_beats_new = item `beats` new_item
-
- incoherent (inst, _) = case is_flag inst of Incoherent _ -> True
- _ -> False
-
- (instA, _) `beats` (instB, _)
- = overlap_ok &&
- isJust (tcMatchTys (mkVarSet (is_tvs instB)) (is_tys instB) (is_tys instA))
- -- A beats B if A is more specific than B,
- -- (ie. if B can be instantiated to match A)
- -- and overlap is permitted
- where
- -- Overlap permitted if *either* instance permits overlap
- -- This is a change (Trac #3877, Dec 10). It used to
- -- require that instB (the less specific one) permitted overlap.
- overlap_ok = case (is_flag instA, is_flag instB) of
- (NoOverlap _, NoOverlap _) -> False
- _ -> True
+
+ new_beats_old = new_item `more_specific_than` old_item
+ old_beats_new = old_item `more_specific_than` new_item
+
+ -- `instB` can be instantiated to match `instA`
+ -- or the two are equal
+ (instA,_) `more_specific_than` (instB,_)
+ = isJust (tcMatchTys (mkVarSet (is_tvs instB))
+ (is_tys instB) (is_tys instA))
+
+ (instA, _) `can_override` (instB, _)
+ = hasOverlappingFlag (overlapMode (is_flag instA))
+ || hasOverlappableFlag (overlapMode (is_flag instB))
+ -- Overlap permitted if either the more specific instance
+ -- is marked as overlapping, or the more general one is
+ -- marked as overlappable.
+ -- Latest change described in: Trac #9242.
+ -- Previous change: Trac #3877, Dec 10.
\end{code}
Note [Incoherent instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-For some classes, the choise of a particular instance does not matter, any one
+For some classes, the choice of a particular instance does not matter, any one
is good. E.g. consider
class D a b where { opD :: a -> b -> String }
instance D Int b where ...
instance D a Int where ...
- g (x::Int) = opD x x
+ g (x::Int) = opD x x -- Wanted: D Int Int
For such classes this should work (without having to add an "instance D Int
Int", and using -XOverlappingInstances, which would then work). This is what
-XIncoherentInstances is for: Telling GHC "I don't care which instance you use;
if you can use one, use it."
-
-Should this logic only work when all candidates have the incoherent flag, or
+Should this logic only work when *all* candidates have the incoherent flag, or
even when all but one have it? The right choice is the latter, which can be
justified by comparing the behaviour with how -XIncoherentInstances worked when
it was only about the unify-check (note [Overlapping instances]):
@@ -682,7 +743,7 @@ Example:
instance [incoherent] [Int] b c
instance [incoherent] C a Int c
Thanks to the incoherent flags,
- foo :: ([a],b,Int)
+ [Wanted] C [a] b Int
works: Only instance one matches, the others just unify, but are marked
incoherent.
diff --git a/compiler/types/Kind.lhs b/compiler/types/Kind.lhs
index 793aa4a761db..04982825ac00 100644
--- a/compiler/types/Kind.lhs
+++ b/compiler/types/Kind.lhs
@@ -3,19 +3,13 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
+{-# LANGUAGE CPP #-}
module Kind (
-- * Main data type
SuperKind, Kind, typeKind,
- -- Kinds
- anyKind, liftedTypeKind, unliftedTypeKind, openTypeKind, constraintKind,
+ -- Kinds
+ anyKind, liftedTypeKind, unliftedTypeKind, openTypeKind, constraintKind,
mkArrowKind, mkArrowKinds,
-- Kind constructors...
@@ -23,9 +17,9 @@ module Kind (
unliftedTypeKindTyCon, constraintKindTyCon,
-- Super Kinds
- superKind, superKindTyCon,
-
- pprKind, pprParendKind,
+ superKind, superKindTyCon,
+
+ pprKind, pprParendKind,
-- ** Deconstructing Kinds
kindAppResult, synTyConResKind,
@@ -41,7 +35,7 @@ module Kind (
okArrowArgKind, okArrowResultKind,
isSubOpenTypeKind, isSubOpenTypeKindKey,
- isSubKind, isSubKindCon,
+ isSubKind, isSubKindCon,
tcIsSubKind, tcIsSubKindCon,
defaultKind, defaultKind_maybe,
@@ -62,48 +56,54 @@ import PrelNames
import Outputable
import Maybes( orElse )
import Util
+import FastString
\end{code}
%************************************************************************
-%* *
- Functions over Kinds
-%* *
+%* *
+ Functions over Kinds
+%* *
%************************************************************************
Note [Kind Constraint and kind *]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The kind Constraint is the kind of classes and other type constraints.
-The special thing about types of kind Constraint is that
+The special thing about types of kind Constraint is that
* They are displayed with double arrow:
f :: Ord a => a -> a
* They are implicitly instantiated at call sites; so the type inference
engine inserts an extra argument of type (Ord a) at every call site
to f.
-However, once type inference is over, there is *no* distinction between
+However, once type inference is over, there is *no* distinction between
Constraint and *. Indeed we can have coercions between the two. Consider
class C a where
op :: a -> a
-For this single-method class we may generate a newtype, which in turn
+For this single-method class we may generate a newtype, which in turn
generates an axiom witnessing
Ord a ~ (a -> a)
so on the left we have Constraint, and on the right we have *.
See Trac #7451.
Bottom line: although '*' and 'Constraint' are distinct TyCons, with
-distinct uniques, they are treated as equal at all times except
+distinct uniques, they are treated as equal at all times except
during type inference. Hence cmpTc treats them as equal.
\begin{code}
-- | Essentially 'funResultTy' on kinds handling pi-types too
-kindFunResult :: Kind -> KindOrType -> Kind
-kindFunResult (FunTy _ res) _ = res
-kindFunResult (ForAllTy kv res) arg = substKiWith [kv] [arg] res
-kindFunResult k _ = pprPanic "kindFunResult" (ppr k)
-
-kindAppResult :: Kind -> [Type] -> Kind
-kindAppResult k [] = k
-kindAppResult k (a:as) = kindAppResult (kindFunResult k a) as
+kindFunResult :: SDoc -> Kind -> KindOrType -> Kind
+kindFunResult _ (FunTy _ res) _ = res
+kindFunResult _ (ForAllTy kv res) arg = substKiWith [kv] [arg] res
+#ifdef DEBUG
+kindFunResult doc k _ = pprPanic "kindFunResult" (ppr k $$ doc)
+#else
+-- Without DEBUG, doc becomes an unsed arg, and will be optimised away
+kindFunResult _ _ _ = panic "kindFunResult"
+#endif
+
+kindAppResult :: SDoc -> Kind -> [Type] -> Kind
+kindAppResult _ k [] = k
+kindAppResult doc k (a:as) = kindAppResult doc (kindFunResult doc k a) as
-- | Essentially 'splitFunTys' on kinds
splitKindFunTys :: Kind -> ([Kind],Kind)
@@ -122,12 +122,13 @@ splitKindFunTysN n (FunTy a r) = case splitKindFunTysN (n-1) r of
(as, k) -> (a:as, k)
splitKindFunTysN n k = pprPanic "splitKindFunTysN" (ppr n <+> ppr k)
--- | Find the result 'Kind' of a type synonym,
+-- | Find the result 'Kind' of a type synonym,
-- after applying it to its 'arity' number of type variables
--- Actually this function works fine on data types too,
+-- Actually this function works fine on data types too,
-- but they'd always return '*', so we never need to ask
synTyConResKind :: TyCon -> Kind
-synTyConResKind tycon = kindAppResult (tyConKind tycon) (map mkTyVarTy (tyConTyVars tycon))
+synTyConResKind tycon = kindAppResult (ptext (sLit "synTyConResKind") <+> ppr tycon)
+ (tyConKind tycon) (map mkTyVarTy (tyConTyVars tycon))
-- | See "Type#kind_subtyping" for details of the distinction between these 'Kind's
isOpenTypeKind, isUnliftedTypeKind,
@@ -204,7 +205,7 @@ isSubOpenTypeKindKey uniq
|| uniq == constraintKindTyConKey -- Needed for error (Num a) "blah"
-- and so that (Ord a -> Eq a) is well-kinded
-- and so that (# Eq a, Ord b #) is well-kinded
- -- See Note [Kind Constraint and kind *]
+ -- See Note [Kind Constraint and kind *]
-- | Is this a kind (i.e. a type-of-types)?
isKind :: Kind -> Bool
@@ -235,7 +236,7 @@ isSubKindCon :: TyCon -> TyCon -> Bool
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
isSubKindCon kc1 kc2
| kc1 == kc2 = True
- | isOpenTypeKindCon kc2 = isSubOpenTypeKindCon kc1
+ | isOpenTypeKindCon kc2 = isSubOpenTypeKindCon kc1
| isConstraintKindCon kc1 = isLiftedTypeKindCon kc2
| isLiftedTypeKindCon kc1 = isConstraintKindCon kc2
-- See Note [Kind Constraint and kind *]
@@ -279,11 +280,11 @@ defaultKind_maybe :: Kind -> Maybe Kind
-- simple (* or *->* etc). So generic type variables (other than
-- built-in constants like 'error') always have simple kinds. This is important;
-- consider
--- f x = True
+-- f x = True
-- We want f to get type
--- f :: forall (a::*). a -> Bool
--- Not
--- f :: forall (a::ArgKind). a -> Bool
+-- f :: forall (a::*). a -> Bool
+-- Not
+-- f :: forall (a::ArgKind). a -> Bool
-- because that would allow a call like (f 3#) as well as (f True),
-- and the calling conventions differ.
-- This defaulting is done in TcMType.zonkTcTyVarBndr.
diff --git a/compiler/types/OptCoercion.lhs b/compiler/types/OptCoercion.lhs
index ebc2cbea5c3e..6eccf4258889 100644
--- a/compiler/types/OptCoercion.lhs
+++ b/compiler/types/OptCoercion.lhs
@@ -3,7 +3,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -26,12 +27,12 @@ import VarEnv
import StaticFlags ( opt_NoOptCoercion )
import Outputable
import Pair
-import Maybes
import FastString
import Util
import Unify
import ListSetOps
import InstEnv
+import Control.Monad ( zipWithM )
\end{code}
%************************************************************************
@@ -57,13 +58,29 @@ because now the co_B1 (which is really free) has been captured, and
subsequent substitutions will go wrong. That's why we can't use
mkCoPredTy in the ForAll case, where this note appears.
+Note [Optimising coercion optimisation]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Looking up a coercion's role or kind is linear in the size of the
+coercion. Thus, doing this repeatedly during the recursive descent
+of coercion optimisation is disastrous. We must be careful to avoid
+doing this if at all possible.
+
+Because it is generally easy to know a coercion's components' roles
+from the role of the outer coercion, we pass down the known role of
+the input in the algorithm below. We also keep functions opt_co2
+and opt_co3 separate from opt_co4, so that the former two do Phantom
+checks that opt_co4 can avoid. This is a big win because Phantom coercions
+rarely appear within non-phantom coercions -- only in some TyConAppCos
+and some AxiomInstCos. We handle these cases specially by calling
+opt_co2.
+
\begin{code}
optCoercion :: CvSubst -> Coercion -> NormalCo
-- ^ optCoercion applies a substitution to a coercion,
-- *and* optimises it to reduce its size
optCoercion env co
| opt_NoOptCoercion = substCo env co
- | otherwise = opt_co env False Nothing co
+ | otherwise = opt_co1 env False co
type NormalCo = Coercion
-- Invariants:
@@ -74,20 +91,24 @@ type NormalCo = Coercion
type NormalNonIdCo = NormalCo -- Extra invariant: not the identity
-opt_co, opt_co' :: CvSubst
- -> Bool -- True <=> return (sym co)
- -> Maybe Role -- Nothing <=> don't change; otherwise, change
- -- INVARIANT: the change is always a *downgrade*
- -> Coercion
- -> NormalCo
-opt_co = opt_co'
+-- | Do we apply a @sym@ to the result?
+type SymFlag = Bool
+
+-- | Do we force the result to be representational?
+type ReprFlag = Bool
+
+-- | Optimize a coercion, making no assumptions.
+opt_co1 :: CvSubst
+ -> SymFlag
+ -> Coercion -> NormalCo
+opt_co1 env sym co = opt_co2 env sym (coercionRole co) co
{-
opt_co env sym co
= pprTrace "opt_co {" (ppr sym <+> ppr co $$ ppr env) $
co1 `seq`
pprTrace "opt_co done }" (ppr co1) $
- (WARN( not same_co_kind, ppr co <+> dcolon <+> pprEqPred (Pair s1 t1)
- $$ ppr co1 <+> dcolon <+> pprEqPred (Pair s2 t2) )
+ (WARN( not same_co_kind, ppr co <+> dcolon <+> ppr (coercionType co)
+ $$ ppr co1 <+> dcolon <+> ppr (coercionType co1) )
WARN( not (coreEqCoercion co1 simple_result),
(text "env=" <+> ppr env) $$
(text "input=" <+> ppr co) $$
@@ -106,111 +127,123 @@ opt_co env sym co
| otherwise = substCo env co
-}
-opt_co' env _ mrole (Refl r ty) = Refl (mrole `orElse` r) (substTy env ty)
-opt_co' env sym mrole co
- | mrole == Just Phantom
- || coercionRole co == Phantom
- , Pair ty1 ty2 <- coercionKind co
- = if sym
- then opt_univ env Phantom ty2 ty1
- else opt_univ env Phantom ty1 ty2
-
-opt_co' env sym mrole (SymCo co) = opt_co env (not sym) mrole co
-opt_co' env sym mrole (TyConAppCo r tc cos)
- = case mrole of
- Nothing -> mkTyConAppCo r tc (map (opt_co env sym Nothing) cos)
- Just r' -> mkTyConAppCo r' tc (zipWith (opt_co env sym)
- (map Just (tyConRolesX r' tc)) cos)
-opt_co' env sym mrole (AppCo co1 co2) = mkAppCo (opt_co env sym mrole co1)
- (opt_co env sym Nothing co2)
-opt_co' env sym mrole (ForAllCo tv co)
+-- See Note [Optimising coercion optimisation]
+-- | Optimize a coercion, knowing the coercion's role. No other assumptions.
+opt_co2 :: CvSubst
+ -> SymFlag
+ -> Role -- ^ The role of the input coercion
+ -> Coercion -> NormalCo
+opt_co2 env sym Phantom co = opt_phantom env sym co
+opt_co2 env sym r co = opt_co3 env sym Nothing r co
+
+-- See Note [Optimising coercion optimisation]
+-- | Optimize a coercion, knowing the coercion's non-Phantom role.
+opt_co3 :: CvSubst -> SymFlag -> Maybe Role -> Role -> Coercion -> NormalCo
+opt_co3 env sym (Just Phantom) _ co = opt_phantom env sym co
+opt_co3 env sym (Just Representational) r co = opt_co4 env sym True r co
+ -- if mrole is Just Nominal, that can't be a downgrade, so we can ignore
+opt_co3 env sym _ r co = opt_co4 env sym False r co
+
+
+-- See Note [Optimising coercion optimisation]
+-- | Optimize a non-phantom coercion.
+opt_co4 :: CvSubst -> SymFlag -> ReprFlag -> Role -> Coercion -> NormalCo
+
+opt_co4 env _ rep r (Refl _r ty)
+ = ASSERT( r == _r )
+ Refl (chooseRole rep r) (substTy env ty)
+
+opt_co4 env sym rep r (SymCo co) = opt_co4 env (not sym) rep r co
+
+opt_co4 env sym rep r g@(TyConAppCo _r tc cos)
+ = ASSERT( r == _r )
+ case (rep, r) of
+ (True, Nominal) ->
+ mkTyConAppCo Representational tc
+ (zipWith3 (opt_co3 env sym)
+ (map Just (tyConRolesX Representational tc))
+ (repeat Nominal)
+ cos)
+ (False, Nominal) ->
+ mkTyConAppCo Nominal tc (map (opt_co4 env sym False Nominal) cos)
+ (_, Representational) ->
+ -- must use opt_co2 here, because some roles may be P
+ -- See Note [Optimising coercion optimisation]
+ mkTyConAppCo r tc (zipWith (opt_co2 env sym)
+ (tyConRolesX r tc) -- the current roles
+ cos)
+ (_, Phantom) -> pprPanic "opt_co4 sees a phantom!" (ppr g)
+
+opt_co4 env sym rep r (AppCo co1 co2) = mkAppCo (opt_co4 env sym rep r co1)
+ (opt_co4 env sym False Nominal co2)
+opt_co4 env sym rep r (ForAllCo tv co)
= case substTyVarBndr env tv of
- (env', tv') -> mkForAllCo tv' (opt_co env' sym mrole co)
+ (env', tv') -> mkForAllCo tv' (opt_co4 env' sym rep r co)
-- Use the "mk" functions to check for nested Refls
-opt_co' env sym mrole (CoVarCo cv)
+opt_co4 env sym rep r (CoVarCo cv)
| Just co <- lookupCoVar env cv
- = opt_co (zapCvSubstEnv env) sym mrole co
+ = opt_co4 (zapCvSubstEnv env) sym rep r co
| Just cv1 <- lookupInScope (getCvInScope env) cv
- = ASSERT( isCoVar cv1 ) wrapRole mrole cv_role $ wrapSym sym (CoVarCo cv1)
+ = ASSERT( isCoVar cv1 ) wrapRole rep r $ wrapSym sym (CoVarCo cv1)
-- cv1 might have a substituted kind!
| otherwise = WARN( True, ptext (sLit "opt_co: not in scope:") <+> ppr cv $$ ppr env)
ASSERT( isCoVar cv )
- wrapRole mrole cv_role $ wrapSym sym (CoVarCo cv)
- where cv_role = coVarRole cv
+ wrapRole rep r $ wrapSym sym (CoVarCo cv)
-opt_co' env sym mrole (AxiomInstCo con ind cos)
+opt_co4 env sym rep r (AxiomInstCo con ind cos)
-- Do *not* push sym inside top-level axioms
-- e.g. if g is a top-level axiom
-- g a : f a ~ a
-- then (sym (g ty)) /= g (sym ty) !!
- = wrapRole mrole (coAxiomRole con) $
+ = ASSERT( r == coAxiomRole con )
+ wrapRole rep (coAxiomRole con) $
wrapSym sym $
- AxiomInstCo con ind (map (opt_co env False Nothing) cos)
+ -- some sub-cos might be P: use opt_co2
+ -- See Note [Optimising coercion optimisation]
+ AxiomInstCo con ind (zipWith (opt_co2 env False)
+ (coAxBranchRoles (coAxiomNthBranch con ind))
+ cos)
-- Note that the_co does *not* have sym pushed into it
-opt_co' env sym mrole (UnivCo r oty1 oty2)
- = opt_univ env role a b
+opt_co4 env sym rep r (UnivCo _r oty1 oty2)
+ = ASSERT( r == _r )
+ opt_univ env (chooseRole rep r) a b
where
(a,b) = if sym then (oty2,oty1) else (oty1,oty2)
- role = mrole `orElse` r
-opt_co' env sym mrole (TransCo co1 co2)
- | sym = opt_trans in_scope opt_co2 opt_co1 -- sym (g `o` h) = sym h `o` sym g
- | otherwise = opt_trans in_scope opt_co1 opt_co2
+opt_co4 env sym rep r (TransCo co1 co2)
+ -- sym (g `o` h) = sym h `o` sym g
+ | sym = opt_trans in_scope co2' co1'
+ | otherwise = opt_trans in_scope co1' co2'
where
- opt_co1 = opt_co env sym mrole co1
- opt_co2 = opt_co env sym mrole co2
+ co1' = opt_co4 env sym rep r co1
+ co2' = opt_co4 env sym rep r co2
in_scope = getCvInScope env
--- NthCo roles are fiddly!
-opt_co' env sym mrole (NthCo n (TyConAppCo _ _ cos))
- = opt_co env sym mrole (getNth cos n)
-opt_co' env sym mrole (NthCo n co)
- | TyConAppCo _ _tc cos <- co'
- , isDecomposableTyCon tc -- Not synonym families
- = ASSERT( n < length cos )
- ASSERT( _tc == tc )
- let resultCo = cos !! n
- resultRole = coercionRole resultCo in
- case (mrole, resultRole) of
- -- if we just need an R coercion, try to propagate the SubCo again:
- (Just Representational, Nominal) -> opt_co (zapCvSubstEnv env) False mrole resultCo
- _ -> resultCo
-
- | otherwise
- = wrap_role $ NthCo n co'
-
- where
- wrap_role wrapped = wrapRole mrole (coercionRole wrapped) wrapped
-
- tc = tyConAppTyCon $ pFst $ coercionKind co
- co' = opt_co env sym mrole' co
- mrole' = case mrole of
- Just Representational
- | Representational <- nthRole Representational tc n
- -> Just Representational
- _ -> Nothing
+opt_co4 env sym rep r co@(NthCo {}) = opt_nth_co env sym rep r co
-opt_co' env sym mrole (LRCo lr co)
+opt_co4 env sym rep r (LRCo lr co)
| Just pr_co <- splitAppCo_maybe co
- = opt_co env sym mrole (pickLR lr pr_co)
+ = ASSERT( r == Nominal )
+ opt_co4 env sym rep Nominal (pickLR lr pr_co)
| Just pr_co <- splitAppCo_maybe co'
- = if mrole == Just Representational
- then opt_co (zapCvSubstEnv env) False mrole (pickLR lr pr_co)
+ = ASSERT( r == Nominal )
+ if rep
+ then opt_co4 (zapCvSubstEnv env) False True Nominal (pickLR lr pr_co)
else pickLR lr pr_co
| otherwise
- = wrapRole mrole Nominal $ LRCo lr co'
+ = wrapRole rep Nominal $ LRCo lr co'
where
- co' = opt_co env sym Nothing co
+ co' = opt_co4 env sym False Nominal co
-opt_co' env sym mrole (InstCo co ty)
+opt_co4 env sym rep r (InstCo co ty)
-- See if the first arg is already a forall
-- ...then we can just extend the current substitution
| Just (tv, co_body) <- splitForAllCo_maybe co
- = opt_co (extendTvSubst env tv ty') sym mrole co_body
+ = opt_co4 (extendTvSubst env tv ty') sym rep r co_body
-- See if it is a forall after optimization
-- If so, do an inefficient one-variable substitution
@@ -219,22 +252,34 @@ opt_co' env sym mrole (InstCo co ty)
| otherwise = InstCo co' ty'
where
- co' = opt_co env sym mrole co
+ co' = opt_co4 env sym rep r co
ty' = substTy env ty
-opt_co' env sym _ (SubCo co) = opt_co env sym (Just Representational) co
+opt_co4 env sym _ r (SubCo co)
+ = ASSERT( r == Representational )
+ opt_co4 env sym True Nominal co
-- XXX: We could add another field to CoAxiomRule that
-- would allow us to do custom simplifications.
-opt_co' env sym mrole (AxiomRuleCo co ts cs) =
- wrapRole mrole (coaxrRole co) $
+opt_co4 env sym rep r (AxiomRuleCo co ts cs)
+ = ASSERT( r == coaxrRole co )
+ wrapRole rep r $
wrapSym sym $
AxiomRuleCo co (map (substTy env) ts)
- (zipWith (opt_co env False) (map Just (coaxrAsmpRoles co)) cs)
-
+ (zipWith (opt_co2 env False) (coaxrAsmpRoles co) cs)
-------------
+-- | Optimize a phantom coercion. The input coercion may not necessarily
+-- be a phantom, but the output sure will be.
+opt_phantom :: CvSubst -> SymFlag -> Coercion -> NormalCo
+opt_phantom env sym co
+ = if sym
+ then opt_univ env Phantom ty2 ty1
+ else opt_univ env Phantom ty1 ty2
+ where
+ Pair ty1 ty2 = coercionKind co
+
opt_univ :: CvSubst -> Role -> Type -> Type -> Coercion
opt_univ env role oty1 oty2
| Just (tc1, tys1) <- splitTyConApp_maybe oty1
@@ -260,6 +305,45 @@ opt_univ env role oty1 oty2
| otherwise
= mkUnivCo role (substTy env oty1) (substTy env oty2)
+-------------
+-- NthCo must be handled separately, because it's the one case where we can't
+-- tell quickly what the component coercion's role is from the containing
+-- coercion. To avoid repeated coercionRole calls as opt_co1 calls opt_co2,
+-- we just look for nested NthCo's, which can happen in practice.
+opt_nth_co :: CvSubst -> SymFlag -> ReprFlag -> Role -> Coercion -> NormalCo
+opt_nth_co env sym rep r = go []
+ where
+ go ns (NthCo n co) = go (n:ns) co
+ -- previous versions checked if the tycon is decomposable. This
+ -- is redundant, because a non-decomposable tycon under an NthCo
+ -- is entirely bogus. See docs/core-spec/core-spec.pdf.
+ go ns co
+ = opt_nths ns co
+
+ -- input coercion is *not* yet sym'd or opt'd
+ opt_nths [] co = opt_co4 env sym rep r co
+ opt_nths (n:ns) (TyConAppCo _ _ cos) = opt_nths ns (cos `getNth` n)
+
+ -- here, the co isn't a TyConAppCo, so we opt it, hoping to get
+ -- a TyConAppCo as output. We don't know the role, so we use
+ -- opt_co1. This is slightly annoying, because opt_co1 will call
+ -- coercionRole, but as long as we don't have a long chain of
+ -- NthCo's interspersed with some other coercion former, we should
+ -- be OK.
+ opt_nths ns co = opt_nths' ns (opt_co1 env sym co)
+
+ -- input coercion *is* sym'd and opt'd
+ opt_nths' [] co
+ = if rep && (r == Nominal)
+ -- propagate the SubCo:
+ then opt_co4 (zapCvSubstEnv env) False True r co
+ else co
+ opt_nths' (n:ns) (TyConAppCo _ _ cos) = opt_nths' ns (cos `getNth` n)
+ opt_nths' ns co = wrapRole rep r (mk_nths ns co)
+
+ mk_nths [] co = co
+ mk_nths (n:ns) co = mk_nths ns (mkNthCo n co)
+
-------------
opt_transList :: InScopeSet -> [NormalCo] -> [NormalCo] -> [NormalCo]
opt_transList is = zipWith (opt_trans is)
@@ -425,11 +509,11 @@ opt_trans_rule is co1 co2
role = coercionRole co1 -- should be the same as coercionRole co2!
opt_trans_rule _ co1 co2 -- Identity rule
- | Pair ty1 _ <- coercionKind co1
+ | (Pair ty1 _, r) <- coercionKindRole co1
, Pair _ ty2 <- coercionKind co2
, ty1 `eqType` ty2
= fireTransRule "RedTypeDirRefl" co1 co2 $
- Refl (coercionRole co1) ty2
+ Refl r ty2
opt_trans_rule _ _ _ = Nothing
@@ -492,16 +576,24 @@ checkAxInstCo (AxiomInstCo ax ind cos)
checkAxInstCo _ = Nothing
-----------
-wrapSym :: Bool -> Coercion -> Coercion
+wrapSym :: SymFlag -> Coercion -> Coercion
wrapSym sym co | sym = SymCo co
| otherwise = co
-wrapRole :: Maybe Role -- desired
- -> Role -- current
+-- | Conditionally set a role to be representational
+wrapRole :: ReprFlag
+ -> Role -- ^ current role
-> Coercion -> Coercion
-wrapRole Nothing _ = id
-wrapRole (Just desired) current = maybeSubCo2 desired current
-
+wrapRole False _ = id
+wrapRole True current = downgradeRole Representational current
+
+-- | If we require a representational role, return that. Otherwise,
+-- return the "default" role provided.
+chooseRole :: ReprFlag
+ -> Role -- ^ "default" role
+ -> Role
+chooseRole True _ = Representational
+chooseRole _ r = r
-----------
-- takes two tyvars and builds env'ts to map them to the same tyvar
substTyVarBndr2 :: CvSubst -> TyVar -> TyVar
@@ -534,7 +626,7 @@ matchAxiom sym ax@(CoAxiom { co_ax_tc = tc }) ind co
, cab_rhs = rhs }) = coAxiomNthBranch ax ind in
case liftCoMatch (mkVarSet qtvs) (if sym then (mkTyConApp tc lhs) else rhs) co of
Nothing -> Nothing
- Just subst -> allMaybes (zipWith (liftCoSubstTyVar subst) roles qtvs)
+ Just subst -> zipWithM (liftCoSubstTyVar subst) roles qtvs
-------------
compatible_co :: Coercion -> Coercion -> Bool
@@ -568,8 +660,7 @@ etaAppCo_maybe :: Coercion -> Maybe (Coercion,Coercion)
etaAppCo_maybe co
| Just (co1,co2) <- splitAppCo_maybe co
= Just (co1,co2)
- | Nominal <- coercionRole co
- , Pair ty1 ty2 <- coercionKind co
+ | (Pair ty1 ty2, Nominal) <- coercionKindRole co
, Just (_,t1) <- splitAppTy_maybe ty1
, Just (_,t2) <- splitAppTy_maybe ty2
, typeKind t1 `eqType` typeKind t2 -- Note [Eta for AppCo]
diff --git a/compiler/types/TyCon.lhs b/compiler/types/TyCon.lhs
index bb489b33e1b3..65b5645d7419 100644
--- a/compiler/types/TyCon.lhs
+++ b/compiler/types/TyCon.lhs
@@ -6,6 +6,7 @@
The @TyCon@ datatype
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable #-}
module TyCon(
-- * Main TyCon data types
@@ -34,14 +35,13 @@ module TyCon(
isFunTyCon,
isPrimTyCon,
isTupleTyCon, isUnboxedTupleTyCon, isBoxedTupleTyCon,
- isSynTyCon,
+ isSynTyCon, isTypeSynonymTyCon,
isDecomposableTyCon,
isForeignTyCon,
isPromotedDataCon, isPromotedTyCon,
isPromotedDataCon_maybe, isPromotedTyCon_maybe,
promotableTyCon_maybe, promoteTyCon,
- isInjectiveTyCon,
isDataTyCon, isProductTyCon, isDataProductTyCon_maybe,
isEnumerationTyCon,
isNewTyCon, isAbstractTyCon,
@@ -183,6 +183,9 @@ See also Note [Wrappers for data instance tycons] in MkId.lhs
It has an AlgTyConParent of
FamInstTyCon T [Int] ax_ti
+* The axiom ax_ti may be eta-reduced; see
+ Note [Eta reduction for data family axioms] in TcInstDcls
+
* The data contructor T2 has a wrapper (which is what the
source-level "T2" invokes):
@@ -576,11 +579,14 @@ data TyConParent
-- 3) A 'CoTyCon' identifying the representation
-- type with the type instance family
| FamInstTyCon -- See Note [Data type families]
- (CoAxiom Unbranched) -- The coercion constructor,
- -- always of kind T ty1 ty2 ~ R:T a b c
- -- where T is the family TyCon,
- -- and R:T is the representation TyCon (ie this one)
- -- and a,b,c are the tyConTyVars of this TyCon
+ (CoAxiom Unbranched) -- The coercion axiom.
+ -- Generally of kind T ty1 ty2 ~ R:T a b c
+ -- where T is the family TyCon,
+ -- and R:T is the representation TyCon (ie this one)
+ -- and a,b,c are the tyConTyVars of this TyCon
+ --
+ -- BUT may be eta-reduced; see TcInstDcls
+ -- Note [Eta reduction for data family axioms]
-- Cached fields of the CoAxiom, but adjusted to
-- use the tyConTyVars of this TyCon
@@ -722,7 +728,7 @@ which encodes as (TyConApp instCoercionTyCon [TyConApp CoT [], s])
Note [Newtype eta]
~~~~~~~~~~~~~~~~~~
Consider
- newtype Parser a = MkParser (IO a) derriving( Monad )
+ newtype Parser a = MkParser (IO a) deriving Monad
Are these two types equal (to Core)?
Monad Parser
Monad IO
@@ -1187,11 +1193,17 @@ isDataProductTyCon_maybe (TupleTyCon { dataCon = con })
= Just con
isDataProductTyCon_maybe _ = Nothing
--- | Is this a 'TyCon' representing a type synonym (@type@)?
+-- | Is this a 'TyCon' representing a regular H98 type synonym (@type@)?
+isTypeSynonymTyCon :: TyCon -> Bool
+isTypeSynonymTyCon (SynTyCon { synTcRhs = SynonymTyCon {} }) = True
+isTypeSynonymTyCon _ = False
+
+-- | Is this 'TyCon' a type synonym or type family?
isSynTyCon :: TyCon -> Bool
isSynTyCon (SynTyCon {}) = True
isSynTyCon _ = False
+
-- As for newtypes, it is in some contexts important to distinguish between
-- closed synonyms and synonym families, as synonym families have no unique
-- right hand side to which a synonym family application can expand.
@@ -1199,7 +1211,14 @@ isSynTyCon _ = False
isDecomposableTyCon :: TyCon -> Bool
-- True iff we can decompose (T a b c) into ((T a b) c)
+-- I.e. is it injective?
-- Specifically NOT true of synonyms (open and otherwise)
+-- Ultimately we may have injective associated types
+-- in which case this test will become more interesting
+--
+-- It'd be unusual to call isDecomposableTyCon on a regular H98
+-- type synonym, because you should probably have expanded it first
+-- But regardless, it's not decomposable
isDecomposableTyCon (SynTyCon {}) = False
isDecomposableTyCon _other = True
@@ -1259,17 +1278,6 @@ isDataFamilyTyCon :: TyCon -> Bool
isDataFamilyTyCon (AlgTyCon {algTcRhs = DataFamilyTyCon {}}) = True
isDataFamilyTyCon _ = False
--- | Injective 'TyCon's can be decomposed, so that
--- T ty1 ~ T ty2 => ty1 ~ ty2
-isInjectiveTyCon :: TyCon -> Bool
-isInjectiveTyCon tc = not (isSynTyCon tc)
- -- Ultimately we may have injective associated types
- -- in which case this test will become more interesting
- --
- -- It'd be unusual to call isInjectiveTyCon on a regular H98
- -- type synonym, because you should probably have expanded it first
- -- But regardless, it's not injective!
-
-- | Are we able to extract informationa 'TyVar' to class argument list
-- mappping from a given 'TyCon'?
isTyConAssoc :: TyCon -> Bool
@@ -1370,13 +1378,15 @@ isPromotedDataCon_maybe _ = Nothing
-- * Family instances are /not/ implicit as they represent the instance body
-- (similar to a @dfun@ does that for a class instance).
isImplicitTyCon :: TyCon -> Bool
-isImplicitTyCon tycon
- | isTyConAssoc tycon = True
- | isSynTyCon tycon = False
- | isAlgTyCon tycon = isTupleTyCon tycon
- | otherwise = True
- -- 'otherwise' catches: FunTyCon, PrimTyCon,
- -- PromotedDataCon, PomotedTypeTyCon
+isImplicitTyCon (FunTyCon {}) = True
+isImplicitTyCon (TupleTyCon {}) = True
+isImplicitTyCon (PrimTyCon {}) = True
+isImplicitTyCon (PromotedDataCon {}) = True
+isImplicitTyCon (PromotedTyCon {}) = True
+isImplicitTyCon (AlgTyCon { algTcParent = AssocFamilyTyCon {} }) = True
+isImplicitTyCon (AlgTyCon {}) = False
+isImplicitTyCon (SynTyCon { synTcParent = AssocFamilyTyCon {} }) = True
+isImplicitTyCon (SynTyCon {}) = False
tyConCType_maybe :: TyCon -> Maybe CType
tyConCType_maybe tc@(AlgTyCon {}) = tyConCType tc
diff --git a/compiler/types/Type.lhs b/compiler/types/Type.lhs
index 0abe463f189b..ad9e8b517c12 100644
--- a/compiler/types/Type.lhs
+++ b/compiler/types/Type.lhs
@@ -6,6 +6,7 @@
Type - public interface
\begin{code}
+{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- | Main functions for manipulating types and type-related things
@@ -35,7 +36,7 @@ module Type (
mkForAllTy, mkForAllTys, splitForAllTy_maybe, splitForAllTys,
mkPiKinds, mkPiType, mkPiTypes,
- applyTy, applyTys, applyTysD, isForAllTy, dropForAlls,
+ applyTy, applyTys, applyTysD, dropForAlls,
mkNumLitTy, isNumLitTy,
mkStrLitTy, isStrLitTy,
@@ -50,7 +51,7 @@ module Type (
isDictLikeTy,
mkEqPred, mkCoerciblePred, mkPrimEqPred, mkReprPrimEqPred,
mkClassPred,
- noParenPred, isClassPred, isEqPred,
+ isClassPred, isEqPred,
isIPPred, isIPPred_maybe, isIPTyCon, isIPClass,
-- Deconstructing predicate types
@@ -62,7 +63,7 @@ module Type (
funTyCon,
-- ** Predicates on types
- isTypeVar, isKindVar,
+ isTypeVar, isKindVar, allDistinctTyVars, isForAllTy,
isTyVarTy, isFunTy, isDictTy, isPredTy, isVoidTy,
-- (Lifting and boxity)
@@ -128,9 +129,10 @@ module Type (
-- * Pretty-printing
pprType, pprParendType, pprTypeApp, pprTyThingCategory, pprTyThing,
- pprTvBndr, pprTvBndrs, pprForAll, pprSigmaType,
- pprEqPred, pprTheta, pprThetaArrowTy, pprClassPred,
+ pprTvBndr, pprTvBndrs, pprForAll, pprUserForAll, pprSigmaType,
+ pprTheta, pprThetaArrowTy, pprClassPred,
pprKind, pprParendKind, pprSourceTyCon,
+ TyPrec(..), maybeParen,
-- * Tidying type related things up for printing
tidyType, tidyTypes,
@@ -321,6 +323,15 @@ getTyVar_maybe ty | Just ty' <- coreView ty = getTyVar_maybe ty'
getTyVar_maybe (TyVarTy tv) = Just tv
getTyVar_maybe _ = Nothing
+allDistinctTyVars :: [KindOrType] -> Bool
+allDistinctTyVars tkvs = go emptyVarSet tkvs
+ where
+ go _ [] = True
+ go so_far (ty : tys)
+ = case getTyVar_maybe ty of
+ Nothing -> False
+ Just tv | tv `elemVarSet` so_far -> False
+ | otherwise -> go (so_far `extendVarSet` tv) tys
\end{code}
@@ -813,7 +824,7 @@ applyTysD doc orig_fun_ty arg_tys
= substTyWith (take n_args tvs) arg_tys
(mkForAllTys (drop n_args tvs) rho_ty)
| otherwise -- Too many type args
- = ASSERT2( n_tvs > 0, doc $$ ppr orig_fun_ty ) -- Zero case gives infnite loop!
+ = ASSERT2( n_tvs > 0, doc $$ ppr orig_fun_ty ) -- Zero case gives infinite loop!
applyTysD doc (substTyWith tvs (take n_tvs arg_tys) rho_ty)
(drop n_tvs arg_tys)
where
@@ -832,13 +843,6 @@ applyTysD doc orig_fun_ty arg_tys
Predicates on PredType
\begin{code}
-noParenPred :: PredType -> Bool
--- A predicate that can appear without parens before a "=>"
--- C a => a -> a
--- a~b => a -> b
--- But (?x::Int) => Int -> Int
-noParenPred p = not (isIPPred p) && isClassPred p || isEqPred p
-
isPredTy :: Type -> Bool
-- NB: isPredTy is used when printing types, which can happen in debug printing
-- during type checking of not-fully-zonked types. So it's not cool to say
@@ -1179,7 +1183,7 @@ seqTypes (ty:tys) = seqType ty `seq` seqTypes tys
%************************************************************************
%* *
- Comparision for types
+ Comparison for types
(We don't use instances so that we know where it happens)
%* *
%************************************************************************
@@ -1365,7 +1369,7 @@ emptyTvSubst :: TvSubst
emptyTvSubst = TvSubst emptyInScopeSet emptyTvSubstEnv
isEmptyTvSubst :: TvSubst -> Bool
- -- See Note [Extending the TvSubstEnv]
+ -- See Note [Extending the TvSubstEnv] in TypeRep
isEmptyTvSubst (TvSubst _ tenv) = isEmptyVarEnv tenv
mkTvSubst :: InScopeSet -> TvSubstEnv -> TvSubst
@@ -1559,7 +1563,7 @@ subst_ty subst ty
substTyVar :: TvSubst -> TyVar -> Type
substTyVar (TvSubst _ tenv) tv
| Just ty <- lookupVarEnv tenv tv = ty -- See Note [Apply Once]
- | otherwise = ASSERT( isTyVar tv ) TyVarTy tv
+ | otherwise = ASSERT( isTyVar tv ) TyVarTy tv -- in TypeRep
-- We do not require that the tyvar is in scope
-- Reason: we do quite a bit of (substTyWith [tv] [ty] tau)
-- and it's a nuisance to bring all the free vars of tau into
@@ -1570,7 +1574,7 @@ substTyVars :: TvSubst -> [TyVar] -> [Type]
substTyVars subst tvs = map (substTyVar subst) tvs
lookupTyVar :: TvSubst -> TyVar -> Maybe Type
- -- See Note [Extending the TvSubst]
+ -- See Note [Extending the TvSubst] in TypeRep
lookupTyVar (TvSubst _ tenv) tv = lookupVarEnv tenv tv
substTyVarBndr :: TvSubst -> TyVar -> (TvSubst, TyVar)
@@ -1589,7 +1593,7 @@ substTyVarBndr subst@(TvSubst in_scope tenv) old_var
no_change = no_kind_change && (new_var == old_var)
-- no_change means that the new_var is identical in
-- all respects to the old_var (same unique, same kind)
- -- See Note [Extending the TvSubst]
+ -- See Note [Extending the TvSubst] in TypeRep
--
-- In that case we don't need to extend the substitution
-- to map old to new. But instead we must zap any
@@ -1635,26 +1639,31 @@ type SimpleKind = Kind
\begin{code}
typeKind :: Type -> Kind
-typeKind (TyConApp tc tys)
- | isPromotedTyCon tc
- = ASSERT( tyConArity tc == length tys ) superKind
- | otherwise
- = kindAppResult (tyConKind tc) tys
-
-typeKind (AppTy fun arg) = kindAppResult (typeKind fun) [arg]
-typeKind (LitTy l) = typeLiteralKind l
-typeKind (ForAllTy _ ty) = typeKind ty
-typeKind (TyVarTy tyvar) = tyVarKind tyvar
-typeKind _ty@(FunTy _arg res)
- -- Hack alert. The kind of (Int -> Int#) is liftedTypeKind (*),
- -- not unliftedTypKind (#)
- -- The only things that can be after a function arrow are
- -- (a) types (of kind openTypeKind or its sub-kinds)
- -- (b) kinds (of super-kind TY) (e.g. * -> (* -> *))
- | isSuperKind k = k
- | otherwise = ASSERT2( isSubOpenTypeKind k, ppr _ty $$ ppr k ) liftedTypeKind
- where
- k = typeKind res
+typeKind orig_ty = go orig_ty
+ where
+
+ go ty@(TyConApp tc tys)
+ | isPromotedTyCon tc
+ = ASSERT( tyConArity tc == length tys ) superKind
+ | otherwise
+ = kindAppResult (ptext (sLit "typeKind 1") <+> ppr ty $$ ppr orig_ty)
+ (tyConKind tc) tys
+
+ go ty@(AppTy fun arg) = kindAppResult (ptext (sLit "typeKind 2") <+> ppr ty $$ ppr orig_ty)
+ (go fun) [arg]
+ go (LitTy l) = typeLiteralKind l
+ go (ForAllTy _ ty) = go ty
+ go (TyVarTy tyvar) = tyVarKind tyvar
+ go _ty@(FunTy _arg res)
+ -- Hack alert. The kind of (Int -> Int#) is liftedTypeKind (*),
+ -- not unliftedTypeKind (#)
+ -- The only things that can be after a function arrow are
+ -- (a) types (of kind openTypeKind or its sub-kinds)
+ -- (b) kinds (of super-kind TY) (e.g. * -> (* -> *))
+ | isSuperKind k = k
+ | otherwise = ASSERT2( isSubOpenTypeKind k, ppr _ty $$ ppr k ) liftedTypeKind
+ where
+ k = go res
typeLiteralKind :: TyLit -> Kind
typeLiteralKind l =
diff --git a/compiler/types/Type.lhs-boot b/compiler/types/Type.lhs-boot
index c2d2dec09380..ff9db3e28c37 100644
--- a/compiler/types/Type.lhs-boot
+++ b/compiler/types/Type.lhs-boot
@@ -3,7 +3,6 @@ module Type where
import {-# SOURCE #-} TypeRep( Type, Kind )
import Var
-noParenPred :: Type -> Bool
isPredTy :: Type -> Bool
typeKind :: Type -> Kind
diff --git a/compiler/types/TypeRep.lhs b/compiler/types/TypeRep.lhs
index 906989a718e2..c8b20e8d9335 100644
--- a/compiler/types/TypeRep.lhs
+++ b/compiler/types/TypeRep.lhs
@@ -15,16 +15,16 @@ Note [The Type-related module hierarchy]
Coercion imports Type
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP, DeriveDataTypeable, DeriveFunctor, DeriveFoldable, DeriveTraversable #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
-
--- We expose the relevant stuff from this module via the Type module
{-# OPTIONS_HADDOCK hide #-}
-{-# LANGUAGE DeriveDataTypeable, DeriveFunctor, DeriveFoldable, DeriveTraversable #-}
+-- We expose the relevant stuff from this module via the Type module
+
module TypeRep (
TyThing(..),
Type(..),
@@ -39,9 +39,10 @@ module TypeRep (
-- Pretty-printing
pprType, pprParendType, pprTypeApp, pprTvBndr, pprTvBndrs,
pprTyThing, pprTyThingCategory, pprSigmaType,
- pprEqPred, pprTheta, pprForAll, pprThetaArrowTy, pprClassPred,
+ pprTheta, pprForAll, pprUserForAll,
+ pprThetaArrowTy, pprClassPred,
pprKind, pprParendKind, pprTyLit, suppressKinds,
- Prec(..), maybeParen, pprTcApp,
+ TyPrec(..), maybeParen, pprTcApp,
pprPrefixApp, pprArrowChain, ppr_type,
-- Free variables
@@ -65,7 +66,7 @@ module TypeRep (
import {-# SOURCE #-} DataCon( dataConTyCon )
import ConLike ( ConLike(..) )
-import {-# SOURCE #-} Type( noParenPred, isPredTy ) -- Transitively pulls in a LOT of stuff, better to break the loop
+import {-# SOURCE #-} Type( isPredTy ) -- Transitively pulls in a LOT of stuff, better to break the loop
-- friends:
import Var
@@ -81,7 +82,6 @@ import CoAxiom
import PrelNames
import Outputable
import FastString
-import Pair
import Util
import DynFlags
@@ -415,7 +415,7 @@ instance NamedThing TyThing where -- Can't put this with the type
-- 2. In particular, the /kind/ of the type variables in
-- the in-scope set is not relevant
--
--- 3. The substition is only applied ONCE! This is because
+-- 3. The substitution is only applied ONCE! This is because
-- in general such application will not reached a fixed point.
data TvSubst
= TvSubst InScopeSet -- The in-scope type and kind variables
@@ -423,7 +423,7 @@ data TvSubst
-- See Note [Apply Once]
-- and Note [Extending the TvSubstEnv]
--- | A substitition of 'Type's for 'TyVar's
+-- | A substitution of 'Type's for 'TyVar's
-- and 'Kind's for 'KindVar's
type TvSubstEnv = TyVarEnv Type
-- A TvSubstEnv is used both inside a TvSubst (with the apply-once
@@ -439,10 +439,10 @@ We use TvSubsts to instantiate things, and we might instantiate
forall a b. ty
\with the types
[a, b], or [b, a].
-So the substition might go [a->b, b->a]. A similar situation arises in Core
+So the substitution might go [a->b, b->a]. A similar situation arises in Core
when we find a beta redex like
(/\ a /\ b -> e) b a
-Then we also end up with a substition that permutes type variables. Other
+Then we also end up with a substitution that permutes type variables. Other
variations happen to; for example [a -> (a, b)].
***************************************************
@@ -491,13 +491,31 @@ defined to use this. @pprParendType@ is the same, except it puts
parens around the type, except for the atomic cases. @pprParendType@
works just by setting the initial context precedence very high.
+Note [Precedence in types]
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+We don't keep the fixity of type operators in the operator. So the pretty printer
+operates the following precedene structre:
+ Type constructor application binds more tightly than
+ Oerator applications which bind more tightly than
+ Function arrow
+
+So we might see a :+: T b -> c
+meaning (a :+: (T b)) -> c
+
+Maybe operator applications should bind a bit less tightly?
+
+Anyway, that's the current story, and it is used consistently for Type and HsType
+
\begin{code}
-data Prec = TopPrec -- No parens
- | FunPrec -- Function args; no parens for tycon apps
- | TyConPrec -- Tycon args; no parens for atomic
- deriving( Eq, Ord )
+data TyPrec -- See Note [Prededence in types]
+
+ = TopPrec -- No parens
+ | FunPrec -- Function args; no parens for tycon apps
+ | TyOpPrec -- Infix operator
+ | TyConPrec -- Tycon args; no parens for atomic
+ deriving( Eq, Ord )
-maybeParen :: Prec -> Prec -> SDoc -> SDoc
+maybeParen :: TyPrec -> TyPrec -> SDoc -> SDoc
maybeParen ctxt_prec inner_prec pretty
| ctxt_prec < inner_prec = pretty
| otherwise = parens pretty
@@ -514,18 +532,6 @@ pprKind, pprParendKind :: Kind -> SDoc
pprKind = pprType
pprParendKind = pprParendType
-------------------
-pprEqPred :: Pair Type -> SDoc
--- NB: Maybe move to Coercion? It's only called after coercionKind anyway.
-pprEqPred (Pair ty1 ty2)
- = sep [ ppr_type FunPrec ty1
- , nest 2 (ptext (sLit "~#"))
- , ppr_type FunPrec ty2]
- -- Precedence looks like (->) so that we get
- -- Maybe a ~ Bool
- -- (a->a) ~ Bool
- -- Note parens on the latter!
-
------------
pprClassPred :: Class -> [Type] -> SDoc
pprClassPred clas tys = pprTypeApp (classTyCon clas) tys
@@ -536,10 +542,9 @@ pprTheta :: ThetaType -> SDoc
pprTheta theta = parens (sep (punctuate comma (map (ppr_type TopPrec) theta)))
pprThetaArrowTy :: ThetaType -> SDoc
-pprThetaArrowTy [] = empty
-pprThetaArrowTy [pred]
- | noParenPred pred = ppr_type TopPrec pred <+> darrow
-pprThetaArrowTy preds = parens (fsep (punctuate comma (map (ppr_type TopPrec) preds)))
+pprThetaArrowTy [] = empty
+pprThetaArrowTy [pred] = ppr_type FunPrec pred <+> darrow
+pprThetaArrowTy preds = parens (fsep (punctuate comma (map (ppr_type TopPrec) preds)))
<+> darrow
-- Notice 'fsep' here rather that 'sep', so that
-- type contexts don't get displayed in a giant column
@@ -573,15 +578,9 @@ instance Outputable TyLit where
------------------
-- OK, here's the main printer
-ppr_type :: Prec -> Type -> SDoc
+ppr_type :: TyPrec -> Type -> SDoc
ppr_type _ (TyVarTy tv) = ppr_tvar tv
-
-ppr_type _ (TyConApp tc [LitTy (StrTyLit n),ty])
- | tc `hasKey` ipClassNameKey
- = char '?' <> ftext n <> ptext (sLit "::") <> ppr_type TopPrec ty
-
ppr_type p (TyConApp tc tys) = pprTyTcApp p tc tys
-
ppr_type p (LitTy l) = ppr_tylit p l
ppr_type p ty@(ForAllTy {}) = ppr_forall_type p ty
@@ -600,15 +599,17 @@ ppr_type p fun_ty@(FunTy ty1 ty2)
ppr_fun_tail other_ty = [ppr_type TopPrec other_ty]
-ppr_forall_type :: Prec -> Type -> SDoc
+ppr_forall_type :: TyPrec -> Type -> SDoc
ppr_forall_type p ty
= maybeParen p FunPrec $ ppr_sigma_type True ty
+ -- True <=> we always print the foralls on *nested* quantifiers
+ -- Opt_PrintExplicitForalls only affects top-level quantifiers
ppr_tvar :: TyVar -> SDoc
ppr_tvar tv -- Note [Infix type variables]
= parenSymOcc (getOccName tv) (ppr tv)
-ppr_tylit :: Prec -> TyLit -> SDoc
+ppr_tylit :: TyPrec -> TyLit -> SDoc
ppr_tylit _ tl =
case tl of
NumTyLit n -> integer n
@@ -616,34 +617,38 @@ ppr_tylit _ tl =
-------------------
ppr_sigma_type :: Bool -> Type -> SDoc
--- Bool <=> Show the foralls
-ppr_sigma_type show_foralls ty
- = sdocWithDynFlags $ \ dflags ->
- let filtered_tvs | gopt Opt_PrintExplicitKinds dflags
- = tvs
- | otherwise
- = filterOut isKindVar tvs
- in sep [ ppWhen show_foralls (pprForAll filtered_tvs)
- , pprThetaArrowTy ctxt
- , pprType tau ]
+-- Bool <=> Show the foralls unconditionally
+ppr_sigma_type show_foralls_unconditionally ty
+ = sep [ if show_foralls_unconditionally
+ then pprForAll tvs
+ else pprUserForAll tvs
+ , pprThetaArrowTy ctxt
+ , pprType tau ]
where
(tvs, rho) = split1 [] ty
(ctxt, tau) = split2 [] rho
split1 tvs (ForAllTy tv ty) = split1 (tv:tvs) ty
- split1 tvs ty = (reverse tvs, ty)
-
+ split1 tvs ty = (reverse tvs, ty)
+
split2 ps (ty1 `FunTy` ty2) | isPredTy ty1 = split2 (ty1:ps) ty2
split2 ps ty = (reverse ps, ty)
-
pprSigmaType :: Type -> SDoc
-pprSigmaType ty = sdocWithDynFlags $ \dflags ->
- ppr_sigma_type (gopt Opt_PrintExplicitForalls dflags) ty
+pprSigmaType ty = ppr_sigma_type False ty
+
+pprUserForAll :: [TyVar] -> SDoc
+-- Print a user-level forall; see Note [WHen to print foralls]
+pprUserForAll tvs
+ = sdocWithDynFlags $ \dflags ->
+ ppWhen (any tv_has_kind_var tvs || gopt Opt_PrintExplicitForalls dflags) $
+ pprForAll tvs
+ where
+ tv_has_kind_var tv = not (isEmptyVarSet (tyVarsOfType (tyVarKind tv)))
pprForAll :: [TyVar] -> SDoc
pprForAll [] = empty
-pprForAll tvs = ptext (sLit "forall") <+> pprTvBndrs tvs <> dot
+pprForAll tvs = forAllLit <+> pprTvBndrs tvs <> dot
pprTvBndrs :: [TyVar] -> SDoc
pprTvBndrs tvs = sep (map pprTvBndr tvs)
@@ -656,6 +661,24 @@ pprTvBndr tv
kind = tyVarKind tv
\end{code}
+Note [When to print foralls]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Mostly we want to print top-level foralls when (and only when) the user specifies
+-fprint-explicit-foralls. But when kind polymorphism is at work, that suppresses
+too much information; see Trac #9018.
+
+So I'm trying out this rule: print explicit foralls if
+ a) User specifies -fprint-explicit-foralls, or
+ b) Any of the quantified type variables has a kind
+ that mentions a kind variable
+
+This catches common situations, such as a type siguature
+ f :: m a
+which means
+ f :: forall k. forall (m :: k->*) (a :: k). m a
+We really want to see both the "forall k" and the kind signatures
+on m and a. The latter comes from pprTvBndr.
+
Note [Infix type variables]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
With TypeOperators you can say
@@ -680,10 +703,15 @@ pprTypeApp tc tys = pprTyTcApp TopPrec tc tys
-- We have to use ppr on the TyCon (not its name)
-- so that we get promotion quotes in the right place
-pprTyTcApp :: Prec -> TyCon -> [Type] -> SDoc
+pprTyTcApp :: TyPrec -> TyCon -> [Type] -> SDoc
-- Used for types only; so that we can make a
-- special case for type-level lists
pprTyTcApp p tc tys
+ | tc `hasKey` ipClassNameKey
+ , [LitTy (StrTyLit n),ty] <- tys
+ = maybeParen p FunPrec $
+ char '?' <> ftext n <> ptext (sLit "::") <> ppr_type TopPrec ty
+
| tc `hasKey` consDataConKey
, [_kind,ty1,ty2] <- tys
= sdocWithDynFlags $ \dflags ->
@@ -693,7 +721,7 @@ pprTyTcApp p tc tys
| otherwise
= pprTcApp p ppr_type tc tys
-pprTcApp :: Prec -> (Prec -> a -> SDoc) -> TyCon -> [a] -> SDoc
+pprTcApp :: TyPrec -> (TyPrec -> a -> SDoc) -> TyCon -> [a] -> SDoc
-- Used for both types and coercions, hence polymorphism
pprTcApp _ pp tc [ty]
| tc `hasKey` listTyConKey = pprPromotionQuote tc <> brackets (pp TopPrec ty)
@@ -717,7 +745,7 @@ pprTcApp p pp tc tys
| otherwise
= sdocWithDynFlags (pprTcApp_help p pp tc tys)
-pprTcApp_help :: Prec -> (Prec -> a -> SDoc) -> TyCon -> [a] -> DynFlags -> SDoc
+pprTcApp_help :: TyPrec -> (TyPrec -> a -> SDoc) -> TyCon -> [a] -> DynFlags -> SDoc
-- This one has accss to the DynFlags
pprTcApp_help p pp tc tys dflags
| not (isSymOcc (nameOccName (tyConName tc)))
@@ -740,6 +768,7 @@ pprTcApp_help p pp tc tys dflags
suppressKinds :: DynFlags -> Kind -> [a] -> [a]
-- Given the kind of a TyCon, and the args to which it is applied,
-- suppress the args that are kind args
+-- C.f. Note [Suppressing kinds] in IfaceType
suppressKinds dflags kind xs
| gopt Opt_PrintExplicitKinds dflags = xs
| otherwise = suppress kind xs
@@ -749,7 +778,7 @@ suppressKinds dflags kind xs
suppress _ xs = xs
----------------
-pprTyList :: Prec -> Type -> Type -> SDoc
+pprTyList :: TyPrec -> Type -> Type -> SDoc
-- Given a type-level list (t1 ': t2), see if we can print
-- it in list notation [t1, ...].
pprTyList p ty1 ty2
@@ -773,19 +802,19 @@ pprTyList p ty1 ty2
gather ty = ([], Just ty)
----------------
-pprInfixApp :: Prec -> (Prec -> a -> SDoc) -> SDoc -> a -> a -> SDoc
+pprInfixApp :: TyPrec -> (TyPrec -> a -> SDoc) -> SDoc -> a -> a -> SDoc
pprInfixApp p pp pp_tc ty1 ty2
- = maybeParen p FunPrec $
- sep [pp FunPrec ty1, pprInfixVar True pp_tc <+> pp FunPrec ty2]
+ = maybeParen p TyOpPrec $
+ sep [pp TyOpPrec ty1, pprInfixVar True pp_tc <+> pp TyOpPrec ty2]
-pprPrefixApp :: Prec -> SDoc -> [SDoc] -> SDoc
+pprPrefixApp :: TyPrec -> SDoc -> [SDoc] -> SDoc
pprPrefixApp p pp_fun pp_tys
| null pp_tys = pp_fun
| otherwise = maybeParen p TyConPrec $
hang pp_fun 2 (sep pp_tys)
----------------
-pprArrowChain :: Prec -> [SDoc] -> SDoc
+pprArrowChain :: TyPrec -> [SDoc] -> SDoc
-- pprArrowChain p [a,b,c] generates a -> b -> c
pprArrowChain _ [] = empty
pprArrowChain p (arg:args) = maybeParen p FunPrec $
diff --git a/compiler/types/Unify.lhs b/compiler/types/Unify.lhs
index 8d1beb6b1dea..1eb1c2b87210 100644
--- a/compiler/types/Unify.lhs
+++ b/compiler/types/Unify.lhs
@@ -3,7 +3,8 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
+{-# LANGUAGE CPP #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
@@ -22,8 +23,7 @@ module Unify (
typesCantMatch,
-- Side-effect free unification
- tcUnifyTys, BindFlag(..),
- niFixTvSubst, niSubstTvSet,
+ tcUnifyTy, tcUnifyTys, BindFlag(..),
UnifyResultM(..), UnifyResult, tcUnifyTysFG
@@ -205,6 +205,8 @@ match _ subst (LitTy x) (LitTy y) | x == y = return subst
match _ _ _ _
= Nothing
+
+
--------------
match_kind :: MatchEnv -> TvSubstEnv -> Kind -> Kind -> Maybe TvSubstEnv
-- Match the kind of the template tyvar with the kind of Type
@@ -416,18 +418,45 @@ substituted, we can't properly unify the types. But, that skolem variable
may later be instantiated with a unifyable type. So, we return maybeApart
in these cases.
+Note [Lists of different lengths are MaybeApart]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It is unusual to call tcUnifyTys or tcUnifyTysFG with lists of different
+lengths. The place where we know this can happen is from compatibleBranches in
+FamInstEnv, when checking data family instances. Data family instances may be
+eta-reduced; see Note [Eta reduction for data family axioms] in TcInstDcls.
+
+We wish to say that
+
+ D :: * -> * -> *
+ axDF1 :: D Int ~ DFInst1
+ axDF2 :: D Int Bool ~ DFInst2
+
+overlap. If we conclude that lists of different lengths are SurelyApart, then
+it will look like these do *not* overlap, causing disaster. See Trac #9371.
+
+In usages of tcUnifyTys outside of family instances, we always use tcUnifyTys,
+which can't tell the difference between MaybeApart and SurelyApart, so those
+usages won't notice this design choice.
+
\begin{code}
+tcUnifyTy :: Type -> Type -- All tyvars are bindable
+ -> Maybe TvSubst -- A regular one-shot (idempotent) substitution
+-- Simple unification of two types; all type variables are bindable
+tcUnifyTy ty1 ty2
+ = case initUM (const BindMe) (unify emptyTvSubstEnv ty1 ty2) of
+ Unifiable subst_env -> Just (niFixTvSubst subst_env)
+ _other -> Nothing
+
+-----------------
tcUnifyTys :: (TyVar -> BindFlag)
-> [Type] -> [Type]
-> Maybe TvSubst -- A regular one-shot (idempotent) substitution
-- The two types may have common type variables, and indeed do so in the
-- second call to tcUnifyTys in FunDeps.checkClsFD
---
tcUnifyTys bind_fn tys1 tys2
- | Unifiable subst <- tcUnifyTysFG bind_fn tys1 tys2
- = Just subst
- | otherwise
- = Nothing
+ = case tcUnifyTysFG bind_fn tys1 tys2 of
+ Unifiable subst -> Just subst
+ _ -> Nothing
-- This type does double-duty. It is used in the UM (unifier monad) and to
-- return the final result. See Note [Fine-grained unification]
@@ -463,19 +492,52 @@ During unification we use a TvSubstEnv that is
(a) non-idempotent
(b) loop-free; ie repeatedly applying it yields a fixed point
+Note [Finding the substitution fixpoint]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Finding the fixpoint of a non-idempotent substitution arising from a
+unification is harder than it looks, because of kinds. Consider
+ T k (H k (f:k)) ~ T * (g:*)
+If we unify, we get the substitution
+ [ k -> *
+ , g -> H k (f:k) ]
+To make it idempotent we don't want to get just
+ [ k -> *
+ , g -> H * (f:k) ]
+We also want to substitute inside f's kind, to get
+ [ k -> *
+ , g -> H k (f:*) ]
+If we don't do this, we may apply the substitition to something,
+and get an ill-formed type, i.e. one where typeKind will fail.
+This happened, for example, in Trac #9106.
+
+This is the reason for extending env with [f:k -> f:*], in the
+definition of env' in niFixTvSubst
+
\begin{code}
niFixTvSubst :: TvSubstEnv -> TvSubst
-- Find the idempotent fixed point of the non-idempotent substitution
+-- See Note [Finding the substitution fixpoint]
-- ToDo: use laziness instead of iteration?
niFixTvSubst env = f env
where
- f e | not_fixpoint = f (mapVarEnv (substTy subst) e)
- | otherwise = subst
+ f env | not_fixpoint = f (mapVarEnv (substTy subst') env)
+ | otherwise = subst
where
- range_tvs = foldVarEnv (unionVarSet . tyVarsOfType) emptyVarSet e
- subst = mkTvSubst (mkInScopeSet range_tvs) e
- not_fixpoint = foldVarSet ((||) . in_domain) False range_tvs
- in_domain tv = tv `elemVarEnv` e
+ not_fixpoint = foldVarSet ((||) . in_domain) False all_range_tvs
+ in_domain tv = tv `elemVarEnv` env
+
+ range_tvs = foldVarEnv (unionVarSet . tyVarsOfType) emptyVarSet env
+ all_range_tvs = closeOverKinds range_tvs
+ subst = mkTvSubst (mkInScopeSet all_range_tvs) env
+
+ -- env' extends env by replacing any free type with
+ -- that same tyvar with a substituted kind
+ -- See note [Finding the substitution fixpoint]
+ env' = extendVarEnvList env [ (rtv, mkTyVarTy $ setTyVarKind rtv $
+ substTy subst $ tyVarKind rtv)
+ | rtv <- varSetElems range_tvs
+ , not (in_domain rtv) ]
+ subst' = mkTvSubst (mkInScopeSet all_range_tvs) env'
niSubstTvSet :: TvSubstEnv -> TyVarSet -> TyVarSet
-- Apply the non-idempotent substitution to a set of type variables,
@@ -551,7 +613,7 @@ unifyList subst orig_xs orig_ys
go subst [] [] = return subst
go subst (x:xs) (y:ys) = do { subst' <- unify subst x y
; go subst' xs ys }
- go _ _ _ = surelyApart
+ go subst _ _ = maybeApart subst -- See Note [Lists of different lengths are MaybeApart]
---------------------------------
uVar :: TvSubstEnv -- An existing substitution to extend
@@ -613,6 +675,7 @@ uUnrefined subst tv1 ty2 ty2' -- ty2 is not a type variable
-- See Note [Fine-grained unification]
| otherwise
= do { subst' <- unify subst k1 k2
+ -- Note [Kinds Containing Only Literals]
; bindTv subst' tv1 ty2 } -- Bind tyvar to the synonym if poss
where
k1 = tyVarKind tv1
@@ -670,9 +733,9 @@ instance Monad UM where
other -> other
SurelyApart -> SurelyApart)
-initUM :: (TyVar -> BindFlag) -> UM TvSubst -> UnifyResult
+initUM :: (TyVar -> BindFlag) -> UM a -> UnifyResultM a
initUM badtvs um = unUM um badtvs
-
+
tvBindFlag :: TyVar -> UM BindFlag
tvBindFlag tv = UM (\tv_fn -> Unifiable (tv_fn tv))
diff --git a/compiler/utils/Bag.lhs b/compiler/utils/Bag.lhs
index 2d823e46bbe0..65c5b39df17d 100644
--- a/compiler/utils/Bag.lhs
+++ b/compiler/utils/Bag.lhs
@@ -6,6 +6,8 @@
Bag: an unordered collection with duplicates
\begin{code}
+{-# LANGUAGE DeriveDataTypeable, ScopedTypeVariables #-}
+
module Bag (
Bag, -- abstract type
diff --git a/compiler/utils/Binary.hs b/compiler/utils/Binary.hs
index 332bfc8e0cc6..bd194b0fde47 100644
--- a/compiler/utils/Binary.hs
+++ b/compiler/utils/Binary.hs
@@ -1,4 +1,4 @@
-{-# OPTIONS -cpp #-}
+{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-}
{-# OPTIONS_GHC -O -funbox-strict-fields #-}
-- We always optimise this, otherwise performance of a non-optimised
-- compiler is severely affected
@@ -27,9 +27,11 @@ module Binary
seekBy,
tellBin,
castBin,
+ diffBin,
writeBinMem,
readBinMem,
+ getBinMemBuf,
fingerprintBinMem,
computeFingerprint,
@@ -124,6 +126,9 @@ newtype Bin a = BinPtr Int
castBin :: Bin a -> Bin b
castBin (BinPtr i) = BinPtr i
+diffBin :: Bin a -> Bin a -> Int
+diffBin (BinPtr i) (BinPtr j) = i - j
+
---------------------------------------------------------------
-- class Binary
---------------------------------------------------------------
@@ -208,6 +213,12 @@ readBinMem filename = do
writeFastMutInt sz_r filesize
return (BinMem noUserData ix_r sz_r arr_r)
+getBinMemBuf :: BinHandle -> IO (Int, ForeignPtr Word8)
+getBinMemBuf (BinMem _ ix_r _ arr_r) = do
+ arr <- readIORef arr_r
+ ix <- readFastMutInt ix_r
+ return (ix, arr)
+
fingerprintBinMem :: BinHandle -> IO Fingerprint
fingerprintBinMem (BinMem _ ix_r _ arr_r) = do
arr <- readIORef arr_r
@@ -707,14 +718,13 @@ getBS bh = do
l <- get bh
fp <- mallocForeignPtrBytes l
withForeignPtr fp $ \ptr -> do
- let
- go n | n == l = return $ BS.fromForeignPtr fp 0 l
+ let go n | n == l = return $ BS.fromForeignPtr fp 0 l
| otherwise = do
b <- getByte bh
pokeElemOff ptr n b
go (n+1)
- --
- go 0
+ --
+ go 0
instance Binary ByteString where
put_ bh f = putBS bh f
@@ -834,18 +844,30 @@ instance Binary RecFlag where
0 -> do return Recursive
_ -> do return NonRecursive
-instance Binary OverlapFlag where
- put_ bh (NoOverlap b) = putByte bh 0 >> put_ bh b
- put_ bh (OverlapOk b) = putByte bh 1 >> put_ bh b
- put_ bh (Incoherent b) = putByte bh 2 >> put_ bh b
+instance Binary OverlapMode where
+ put_ bh NoOverlap = putByte bh 0
+ put_ bh Overlaps = putByte bh 1
+ put_ bh Incoherent = putByte bh 2
+ put_ bh Overlapping = putByte bh 3
+ put_ bh Overlappable = putByte bh 4
get bh = do
h <- getByte bh
- b <- get bh
case h of
- 0 -> return $ NoOverlap b
- 1 -> return $ OverlapOk b
- 2 -> return $ Incoherent b
- _ -> panic ("get OverlapFlag " ++ show h)
+ 0 -> return NoOverlap
+ 1 -> return Overlaps
+ 2 -> return Incoherent
+ 3 -> return Overlapping
+ 4 -> return Overlappable
+ _ -> panic ("get OverlapMode" ++ show h)
+
+
+instance Binary OverlapFlag where
+ put_ bh flag = do put_ bh (overlapMode flag)
+ put_ bh (isSafeOverlap flag)
+ get bh = do
+ h <- get bh
+ b <- get bh
+ return OverlapFlag { overlapMode = h, isSafeOverlap = b }
instance Binary FixityDirection where
put_ bh InfixL = do
diff --git a/compiler/utils/BufWrite.hs b/compiler/utils/BufWrite.hs
index f85ea8e792e0..7eba0753fe62 100644
--- a/compiler/utils/BufWrite.hs
+++ b/compiler/utils/BufWrite.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-----------------------------------------------------------------------------
--
-- Fast write-buffered Handles
@@ -10,7 +12,7 @@
--
-----------------------------------------------------------------------------
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/utils/Digraph.lhs b/compiler/utils/Digraph.lhs
index cc684303b63d..35782bac6e49 100644
--- a/compiler/utils/Digraph.lhs
+++ b/compiler/utils/Digraph.lhs
@@ -3,28 +3,21 @@
%
\begin{code}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE CPP, ScopedTypeVariables #-}
module Digraph(
Graph, graphFromVerticesAndAdjacency, graphFromEdgedVertices,
SCC(..), Node, flattenSCC, flattenSCCs,
- stronglyConnCompG, stronglyConnCompFromG,
+ stronglyConnCompG,
topologicalSortG, dfsTopSortG,
verticesG, edgesG, hasVertexG,
- reachableG, transposeG,
+ reachableG, reachablesG, transposeG,
outdegreeG, indegreeG,
vertexGroupsG, emptyG,
componentsG,
findCycle,
-
+
-- For backwards compatability with the simpler version of Digraph
stronglyConnCompFromEdgedVertices, stronglyConnCompFromEdgedVerticesR,
@@ -77,14 +70,14 @@ Note [Nodes, keys, vertices]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* A 'node' is a big blob of client-stuff
- * Each 'node' has a unique (client) 'key', but the latter
- is in Ord and has fast comparison
+ * Each 'node' has a unique (client) 'key', but the latter
+ is in Ord and has fast comparison
* Digraph then maps each 'key' to a Vertex (Int) which is
- arranged densely in 0.n
+ arranged densely in 0.n
\begin{code}
-data Graph node = Graph {
+data Graph node = Graph {
gr_int_graph :: IntGraph,
gr_vertex_to_node :: Vertex -> node,
gr_node_to_vertex :: node -> Maybe Vertex
@@ -92,12 +85,12 @@ data Graph node = Graph {
data Edge node = Edge node node
-type Node key payload = (payload, key, [key])
+type Node key payload = (payload, key, [key])
-- The payload is user data, just carried around in this module
-- The keys are ordered
- -- The [key] are the dependencies of the node;
+ -- The [key] are the dependencies of the node;
-- it's ok to have extra keys in the dependencies that
- -- are not the key of any Node in the graph
+ -- are not the key of any Node in the graph
emptyGraph :: Graph a
emptyGraph = Graph (array (1, 0) []) (error "emptyGraph") (const Nothing)
@@ -105,7 +98,7 @@ emptyGraph = Graph (array (1, 0) []) (error "emptyGraph") (const Nothing)
graphFromVerticesAndAdjacency
:: Ord key
=> [(node, key)]
- -> [(key, key)] -- First component is source vertex key,
+ -> [(key, key)] -- First component is source vertex key,
-- second is target vertex key (thing depended on)
-- Unlike the other interface I insist they correspond to
-- actual vertices because the alternative hides bugs. I can't
@@ -115,7 +108,7 @@ graphFromVerticesAndAdjacency [] _ = emptyGraph
graphFromVerticesAndAdjacency vertices edges = Graph graph vertex_node (key_vertex . key_extractor)
where key_extractor = snd
(bounds, vertex_node, key_vertex, _) = reduceNodesIntoVertices vertices key_extractor
- key_vertex_pair (a, b) = (expectJust "graphFromVerticesAndAdjacency" $ key_vertex a,
+ key_vertex_pair (a, b) = (expectJust "graphFromVerticesAndAdjacency" $ key_vertex a,
expectJust "graphFromVerticesAndAdjacency" $ key_vertex b)
reduced_edges = map key_vertex_pair edges
graph = buildG bounds reduced_edges
@@ -132,10 +125,10 @@ graphFromEdgedVertices edged_vertices = Graph graph vertex_fn (key_vertex . key_
(bounds, vertex_fn, key_vertex, numbered_nodes) = reduceNodesIntoVertices edged_vertices key_extractor
graph = array bounds [(v, mapMaybe key_vertex ks) | (v, (_, _, ks)) <- numbered_nodes]
-reduceNodesIntoVertices
- :: Ord key
- => [node]
- -> (node -> key)
+reduceNodesIntoVertices
+ :: Ord key
+ => [node]
+ -> (node -> key)
-> (Bounds, Vertex -> node, key -> Maybe Vertex, [(Int, node)])
reduceNodesIntoVertices nodes key_extractor = (bounds, (!) vertex_map, key_vertex, numbered_nodes)
where
@@ -168,18 +161,18 @@ reduceNodesIntoVertices nodes key_extractor = (bounds, (!) vertex_map, key_verte
\begin{code}
type WorkItem key payload
- = (Node key payload, -- Tip of the path
- [payload]) -- Rest of the path;
- -- [a,b,c] means c depends on b, b depends on a
+ = (Node key payload, -- Tip of the path
+ [payload]) -- Rest of the path;
+ -- [a,b,c] means c depends on b, b depends on a
-- | Find a reasonably short cycle a->b->c->a, in a strongly
-- connected component. The input nodes are presumed to be
-- a SCC, so you can start anywhere.
-findCycle :: forall payload key. Ord key
+findCycle :: forall payload key. Ord key
=> [Node key payload] -- The nodes. The dependencies can
- -- contain extra keys, which are ignored
- -> Maybe [payload] -- A cycle, starting with node
- -- so each depends on the next
+ -- contain extra keys, which are ignored
+ -> Maybe [payload] -- A cycle, starting with node
+ -- so each depends on the next
findCycle graph
= go Set.empty (new_work root_deps []) []
where
@@ -189,29 +182,29 @@ findCycle graph
-- Find the node with fewest dependencies among the SCC modules
-- This is just a heuristic to find some plausible root module
root :: Node key payload
- root = fst (minWith snd [ (node, count (`Map.member` env) deps)
+ root = fst (minWith snd [ (node, count (`Map.member` env) deps)
| node@(_,_,deps) <- graph ])
(root_payload,root_key,root_deps) = root
-- 'go' implements Dijkstra's algorithm, more or less
- go :: Set.Set key -- Visited
- -> [WorkItem key payload] -- Work list, items length n
- -> [WorkItem key payload] -- Work list, items length n+1
- -> Maybe [payload] -- Returned cycle
+ go :: Set.Set key -- Visited
+ -> [WorkItem key payload] -- Work list, items length n
+ -> [WorkItem key payload] -- Work list, items length n+1
+ -> Maybe [payload] -- Returned cycle
-- Invariant: in a call (go visited ps qs),
-- visited = union (map tail (ps ++ qs))
- go _ [] [] = Nothing -- No cycles
+ go _ [] [] = Nothing -- No cycles
go visited [] qs = go visited qs []
- go visited (((payload,key,deps), path) : ps) qs
+ go visited (((payload,key,deps), path) : ps) qs
| key == root_key = Just (root_payload : reverse path)
| key `Set.member` visited = go visited ps qs
| key `Map.notMember` env = go visited ps qs
| otherwise = go (Set.insert key visited)
ps (new_qs ++ qs)
where
- new_qs = new_work deps (payload : path)
+ new_qs = new_work deps (payload : path)
new_work :: [key] -> [payload] -> [WorkItem key payload]
new_work deps path = [ (n, path) | Just n <- map (`Map.lookup` env) deps ]
@@ -250,7 +243,7 @@ instance Outputable a => Outputable (SCC a) where
%************************************************************************
Note: the components are returned topologically sorted: later components
-depend on earlier ones, but not vice versa i.e. later components only have
+depend on earlier ones, but not vice versa i.e. later components only have
edges going from them to earlier ones.
\begin{code}
@@ -258,14 +251,6 @@ stronglyConnCompG :: Graph node -> [SCC node]
stronglyConnCompG graph = decodeSccs graph forest
where forest = {-# SCC "Digraph.scc" #-} scc (gr_int_graph graph)
--- Find the set of strongly connected components starting from the
--- given roots. This is a good way to discard unreachable nodes at
--- the same time as computing SCCs.
-stronglyConnCompFromG :: Graph node -> [node] -> [SCC node]
-stronglyConnCompFromG graph roots = decodeSccs graph forest
- where forest = {-# SCC "Digraph.scc" #-} sccFrom (gr_int_graph graph) vs
- vs = [ v | Just v <- map (gr_node_to_vertex graph) roots ]
-
decodeSccs :: Graph node -> Forest Vertex -> [SCC node]
decodeSccs Graph { gr_int_graph = graph, gr_vertex_to_node = vertex_fn } forest
= map decode forest
@@ -315,7 +300,13 @@ dfsTopSortG graph =
reachableG :: Graph node -> node -> [node]
reachableG graph from = map (gr_vertex_to_node graph) result
where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from)
- result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) from_vertex
+ result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) [from_vertex]
+
+reachablesG :: Graph node -> [node] -> [node]
+reachablesG graph froms = map (gr_vertex_to_node graph) result
+ where result = {-# SCC "Digraph.reachable" #-}
+ reachable (gr_int_graph graph) vs
+ vs = [ v | Just v <- map (gr_node_to_vertex graph) froms ]
hasVertexG :: Graph node -> node -> Bool
hasVertexG graph node = isJust $ gr_node_to_vertex graph node
@@ -548,9 +539,6 @@ postorderF ts = foldr (.) id $ map postorder ts
postOrd :: IntGraph -> [Vertex]
postOrd g = postorderF (dff g) []
-postOrdFrom :: IntGraph -> [Vertex] -> [Vertex]
-postOrdFrom g vs = postorderF (dfs g vs) []
-
topSort :: IntGraph -> [Vertex]
topSort = reverse . postOrd
\end{code}
@@ -574,9 +562,6 @@ undirected g = buildG (bounds g) (edges g ++ reverseE g)
\begin{code}
scc :: IntGraph -> Forest Vertex
scc g = dfs g (reverse (postOrd (transpose g)))
-
-sccFrom :: IntGraph -> [Vertex] -> Forest Vertex
-sccFrom g vs = reverse (dfs (transpose g) (reverse (postOrdFrom g vs)))
\end{code}
------------------------------------------------------------
@@ -602,11 +587,11 @@ forward g tree pre = mapT select g
------------------------------------------------------------
\begin{code}
-reachable :: IntGraph -> Vertex -> [Vertex]
-reachable g v = preorderF (dfs g [v])
+reachable :: IntGraph -> [Vertex] -> [Vertex]
+reachable g vs = preorderF (dfs g vs)
path :: IntGraph -> Vertex -> Vertex -> Bool
-path g v w = w `elem` (reachable g v)
+path g v w = w `elem` (reachable g [v])
\end{code}
------------------------------------------------------------
@@ -664,18 +649,18 @@ noOutEdges g = [ v | v <- vertices g, null (g!v)]
vertexGroupsS :: Set s -> IntGraph -> [Vertex] -> ST s [[Vertex]]
vertexGroupsS provided g to_provide
- = if null to_provide
- then do {
+ = if null to_provide
+ then do {
all_provided <- allM (provided `contains`) (vertices g)
; if all_provided
then return []
- else error "vertexGroup: cyclic graph"
+ else error "vertexGroup: cyclic graph"
}
- else do {
+ else do {
mapM_ (include provided) to_provide
; to_provide' <- filterM (vertexReady provided g) (vertices g)
; rest <- vertexGroupsS provided g to_provide'
- ; return $ to_provide : rest
+ ; return $ to_provide : rest
}
vertexReady :: Set s -> IntGraph -> Vertex -> ST s Bool
diff --git a/compiler/utils/Encoding.hs b/compiler/utils/Encoding.hs
index c4a669c1341c..115703fc69d6 100644
--- a/compiler/utils/Encoding.hs
+++ b/compiler/utils/Encoding.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP, MagicHash, UnboxedTuples #-}
{-# OPTIONS_GHC -O #-}
-- We always optimise this, otherwise performance of a non-optimised
-- compiler is severely affected
diff --git a/compiler/utils/ExtsCompat46.hs b/compiler/utils/ExtsCompat46.hs
index da0e67ab932d..a33fef57d8f6 100644
--- a/compiler/utils/ExtsCompat46.hs
+++ b/compiler/utils/ExtsCompat46.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE BangPatterns, CPP #-}
+{-# LANGUAGE BangPatterns, CPP, MagicHash #-}
-----------------------------------------------------------------------------
-- |
diff --git a/compiler/utils/FastBool.lhs b/compiler/utils/FastBool.lhs
index 32cb7aef3a58..9558da7079cb 100644
--- a/compiler/utils/FastBool.lhs
+++ b/compiler/utils/FastBool.lhs
@@ -4,6 +4,8 @@
\section{Fast booleans}
\begin{code}
+{-# LANGUAGE CPP, MagicHash #-}
+
module FastBool (
--fastBool could be called bBox; isFastTrue, bUnbox; but they're not
FastBool, fastBool, isFastTrue, fastOr, fastAnd
diff --git a/compiler/utils/FastFunctions.lhs b/compiler/utils/FastFunctions.lhs
index b1dacdcd9bc1..457fcc9c9338 100644
--- a/compiler/utils/FastFunctions.lhs
+++ b/compiler/utils/FastFunctions.lhs
@@ -4,6 +4,7 @@ Z%
\section{Fast functions}
\begin{code}
+{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-}
module FastFunctions (
unsafeChr, inlinePerformIO, unsafeDupableInterleaveIO,
diff --git a/compiler/utils/FastMutInt.lhs b/compiler/utils/FastMutInt.lhs
index 7156cdc9fb0e..0f0ca78e144c 100644
--- a/compiler/utils/FastMutInt.lhs
+++ b/compiler/utils/FastMutInt.lhs
@@ -1,6 +1,5 @@
\begin{code}
-{-# LANGUAGE BangPatterns #-}
-{-# OPTIONS -cpp #-}
+{-# LANGUAGE CPP, BangPatterns, MagicHash, UnboxedTuples #-}
{-# OPTIONS_GHC -O #-}
-- We always optimise this, otherwise performance of a non-optimised
-- compiler is severely affected
diff --git a/compiler/utils/FastString.lhs b/compiler/utils/FastString.lhs
index a4908b58c142..157e5f08b04a 100644
--- a/compiler/utils/FastString.lhs
+++ b/compiler/utils/FastString.lhs
@@ -2,7 +2,7 @@
% (c) The University of Glasgow, 1997-2006
%
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP, DeriveDataTypeable, MagicHash, UnboxedTuples #-}
{-# OPTIONS_GHC -O -funbox-strict-fields #-}
-- We always optimise this, otherwise performance of a non-optimised
-- compiler is severely affected
@@ -10,18 +10,20 @@
-- |
-- There are two principal string types used internally by GHC:
--
--- 'FastString':
--- * A compact, hash-consed, representation of character strings.
--- * Comparison is O(1), and you can get a 'Unique.Unique' from them.
--- * Generated by 'fsLit'.
--- * Turn into 'Outputable.SDoc' with 'Outputable.ftext'.
+-- ['FastString']
--
--- 'LitString':
--- * Just a wrapper for the @Addr#@ of a C string (@Ptr CChar@).
--- * Practically no operations.
--- * Outputing them is fast.
--- * Generated by 'sLit'.
--- * Turn into 'Outputable.SDoc' with 'Outputable.ptext'
+-- * A compact, hash-consed, representation of character strings.
+-- * Comparison is O(1), and you can get a 'Unique.Unique' from them.
+-- * Generated by 'fsLit'.
+-- * Turn into 'Outputable.SDoc' with 'Outputable.ftext'.
+--
+-- ['LitString']
+--
+-- * Just a wrapper for the @Addr#@ of a C string (@Ptr CChar@).
+-- * Practically no operations.
+-- * Outputing them is fast.
+-- * Generated by 'sLit'.
+-- * Turn into 'Outputable.SDoc' with 'Outputable.ptext'
--
-- Use 'LitString' unless you want the facilities of 'FastString'.
module FastString
@@ -237,7 +239,7 @@ data FastStringTable =
string_table :: FastStringTable
{-# NOINLINE string_table #-}
string_table = unsafePerformIO $ do
- uid <- newIORef 0
+ uid <- newIORef 603979776 -- ord '$' * 0x01000000
tab <- IO $ \s1# -> case newArray# hASH_TBL_SIZE_UNBOXED (panic "string_table") s1# of
(# s2#, arr# #) ->
(# s2#, FastStringTable uid arr# #)
diff --git a/compiler/utils/FastTypes.lhs b/compiler/utils/FastTypes.lhs
index 0ef10ade561e..36d8e4c4fd7c 100644
--- a/compiler/utils/FastTypes.lhs
+++ b/compiler/utils/FastTypes.lhs
@@ -4,6 +4,7 @@
\section{Fast integers, etc... booleans moved to FastBool for using panic}
\begin{code}
+{-# LANGUAGE CPP, MagicHash #-}
--Even if the optimizer could handle boxed arithmetic equally well,
--this helps automatically check the sources to make sure that
diff --git a/compiler/utils/Fingerprint.hsc b/compiler/utils/Fingerprint.hsc
index 9a55e385b3da..464337b7a913 100644
--- a/compiler/utils/Fingerprint.hsc
+++ b/compiler/utils/Fingerprint.hsc
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- ----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 2006
diff --git a/compiler/utils/GraphBase.hs b/compiler/utils/GraphBase.hs
index 8cb3acee718c..2aa16ae99e18 100644
--- a/compiler/utils/GraphBase.hs
+++ b/compiler/utils/GraphBase.hs
@@ -1,7 +1,7 @@
-- | Types for the general graph colorer.
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/utils/GraphPpr.hs b/compiler/utils/GraphPpr.hs
index a896bbbf63b4..2682c7347e6b 100644
--- a/compiler/utils/GraphPpr.hs
+++ b/compiler/utils/GraphPpr.hs
@@ -1,7 +1,7 @@
-- | Pretty printing of graphs.
-{-# OPTIONS -fno-warn-tabs #-}
+{-# OPTIONS_GHC -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
diff --git a/compiler/utils/IOEnv.hs b/compiler/utils/IOEnv.hs
index 6885bbd12795..1db15537c7dd 100644
--- a/compiler/utils/IOEnv.hs
+++ b/compiler/utils/IOEnv.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE DeriveDataTypeable, UndecidableInstances #-}
+
--
-- (c) The University of Glasgow 2002-2006
--
@@ -7,7 +9,6 @@
-- as its in the IO monad, mutable references can be used
-- for updating state.
--
-{-# LANGUAGE UndecidableInstances #-}
module IOEnv (
IOEnv, -- Instance of Monad
diff --git a/compiler/utils/ListSetOps.lhs b/compiler/utils/ListSetOps.lhs
index 5ad402d0815f..6247dc67f620 100644
--- a/compiler/utils/ListSetOps.lhs
+++ b/compiler/utils/ListSetOps.lhs
@@ -5,6 +5,7 @@
\section[ListSetOps]{Set-like operations on lists}
\begin{code}
+{-# LANGUAGE CPP #-}
module ListSetOps (
unionLists, minusList, insertList,
diff --git a/compiler/utils/Maybes.lhs b/compiler/utils/Maybes.lhs
index 3c943bd22452..d9e1762a2f07 100644
--- a/compiler/utils/Maybes.lhs
+++ b/compiler/utils/Maybes.lhs
@@ -11,12 +11,9 @@ module Maybes (
failME, isSuccess,
orElse,
- mapCatMaybes,
- allMaybes,
firstJust, firstJusts,
whenIsJust,
expectJust,
- maybeToBool,
MaybeT(..)
) where
@@ -34,53 +31,26 @@ infixr 4 `orElse`
%************************************************************************
\begin{code}
-maybeToBool :: Maybe a -> Bool
-maybeToBool Nothing = False
-maybeToBool (Just _) = True
-
--- | Collects a list of @Justs@ into a single @Just@, returning @Nothing@ if
--- there are any @Nothings@.
-allMaybes :: [Maybe a] -> Maybe [a]
-allMaybes [] = Just []
-allMaybes (Nothing : _) = Nothing
-allMaybes (Just x : ms) = case allMaybes ms of
- Nothing -> Nothing
- Just xs -> Just (x:xs)
-
firstJust :: Maybe a -> Maybe a -> Maybe a
-firstJust (Just a) _ = Just a
-firstJust Nothing b = b
+firstJust a b = firstJusts [a, b]
-- | Takes a list of @Maybes@ and returns the first @Just@ if there is one, or
-- @Nothing@ otherwise.
firstJusts :: [Maybe a] -> Maybe a
-firstJusts = foldr firstJust Nothing
-\end{code}
+firstJusts = msum
-\begin{code}
expectJust :: String -> Maybe a -> a
{-# INLINE expectJust #-}
expectJust _ (Just x) = x
expectJust err Nothing = error ("expectJust " ++ err)
-\end{code}
-
-\begin{code}
-mapCatMaybes :: (a -> Maybe b) -> [a] -> [b]
-mapCatMaybes _ [] = []
-mapCatMaybes f (x:xs) = case f x of
- Just y -> y : mapCatMaybes f xs
- Nothing -> mapCatMaybes f xs
whenIsJust :: Monad m => Maybe a -> (a -> m ()) -> m ()
whenIsJust (Just x) f = f x
whenIsJust Nothing _ = return ()
-\end{code}
-\begin{code}
--- | flipped version of @fromMaybe@.
+-- | Flipped version of @fromMaybe@, useful for chaining.
orElse :: Maybe a -> a -> a
-(Just x) `orElse` _ = x
-Nothing `orElse` y = y
+orElse = flip fromMaybe
\end{code}
%************************************************************************
diff --git a/compiler/utils/OrdList.lhs b/compiler/utils/OrdList.lhs
index d1d8708dd355..4cbb57b2ff7e 100644
--- a/compiler/utils/OrdList.lhs
+++ b/compiler/utils/OrdList.lhs
@@ -15,6 +15,10 @@ module OrdList (
mapOL, fromOL, toOL, foldrOL, foldlOL
) where
+import Outputable
+
+import Data.Monoid
+
infixl 5 `appOL`
infixl 5 `snocOL`
infixr 5 `consOL`
@@ -28,6 +32,13 @@ data OrdList a
| Two (OrdList a) -- Invariant: non-empty
(OrdList a) -- Invariant: non-empty
+instance Outputable a => Outputable (OrdList a) where
+ ppr ol = ppr (fromOL ol) -- Convert to list and print that
+
+instance Monoid (OrdList a) where
+ mempty = nilOL
+ mappend = appOL
+ mconcat = concatOL
nilOL :: OrdList a
isNilOL :: OrdList a -> Bool
diff --git a/compiler/utils/Outputable.lhs b/compiler/utils/Outputable.lhs
index 8a12670d50ab..c1adf934e16b 100644
--- a/compiler/utils/Outputable.lhs
+++ b/compiler/utils/Outputable.lhs
@@ -22,11 +22,12 @@ module Outputable (
char,
text, ftext, ptext, ztext,
int, intWithCommas, integer, float, double, rational,
- parens, cparen, brackets, braces, quotes, quote,
+ parens, cparen, brackets, braces, quotes, quote,
doubleQuotes, angleBrackets, paBrackets,
- semi, comma, colon, dcolon, space, equals, dot, arrow, darrow,
+ semi, comma, colon, dcolon, space, equals, dot,
+ arrow, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt,
lparen, rparen, lbrack, rbrack, lbrace, rbrace, underscore,
- blankLine,
+ blankLine, forAllLit,
(<>), (<+>), hcat, hsep,
($$), ($+$), vcat,
sep, cat,
@@ -52,17 +53,20 @@ module Outputable (
-- * Controlling the style in which output is printed
BindingSite(..),
- PprStyle, CodeStyle(..), PrintUnqualified,
+ PprStyle, CodeStyle(..), PrintUnqualified(..),
+ QueryQualifyName, QueryQualifyModule, QueryQualifyPackage,
+ reallyAlwaysQualify, reallyAlwaysQualifyNames,
alwaysQualify, alwaysQualifyNames, alwaysQualifyModules,
neverQualify, neverQualifyNames, neverQualifyModules,
- QualifyName(..),
+ QualifyName(..), queryQual,
sdocWithDynFlags, sdocWithPlatform,
getPprStyle, withPprStyle, withPprStyleDoc,
pprDeeper, pprDeeperList, pprSetDepth,
codeStyle, userStyle, debugStyle, dumpStyle, asmStyle,
- ifPprDebug, qualName, qualModule,
- mkErrStyle, defaultErrStyle, defaultDumpStyle, defaultUserStyle,
- mkUserStyle, cmdlineParserStyle, Depth(..),
+ ifPprDebug, qualName, qualModule, qualPackage,
+ mkErrStyle, defaultErrStyle, defaultDumpStyle, lineAnnotatedDumpStyle,
+ defaultUserStyle, mkUserStyle, cmdlineParserStyle, Depth(..),
+ pprAnnotate,
-- * Error handling and debugging utilities
pprPanic, pprSorry, assertPprPanic, pprPanicFastInt, pprPgmError,
@@ -73,9 +77,9 @@ module Outputable (
import {-# SOURCE #-} DynFlags( DynFlags,
targetPlatform, pprUserLength, pprCols,
- useUnicodeQuotes,
+ useUnicode, useUnicodeSyntax,
unsafeGlobalDynFlags )
-import {-# SOURCE #-} Module( Module, ModuleName, moduleName )
+import {-# SOURCE #-} Module( PackageKey, Module, ModuleName, moduleName )
import {-# SOURCE #-} OccName( OccName )
import {-# SOURCE #-} StaticFlags( opt_PprStyle_Debug, opt_NoDebugOutput )
@@ -125,10 +129,13 @@ data PprStyle
| PprCode CodeStyle
-- Print code; either C or assembler
- | PprDump -- For -ddump-foo; less verbose than PprDebug.
+ | PprDump Bool -- For -ddump-foo; less verbose than PprDebug.
-- Does not assume tidied code: non-external names
-- are printed with uniques.
+ -- Parameter governs whether to generate markers
+ -- for finding line number information later
+
| PprDebug -- Full debugging output
data CodeStyle = CStyle -- The format of labels differs for C and assembler
@@ -141,12 +148,15 @@ data Depth = AllTheWay
-- -----------------------------------------------------------------------------
-- Printing original names
--- When printing code that contains original names, we need to map the
+-- | When printing code that contains original names, we need to map the
-- original names back to something the user understands. This is the
--- purpose of the pair of functions that gets passed around
+-- purpose of the triple of functions that gets passed around
-- when rendering 'SDoc'.
-
-type PrintUnqualified = (QueryQualifyName, QueryQualifyModule)
+data PrintUnqualified = QueryQualify {
+ queryQualifyName :: QueryQualifyName,
+ queryQualifyModule :: QueryQualifyModule,
+ queryQualifyPackage :: QueryQualifyPackage
+}
-- | given an /original/ name, this function tells you which module
-- name it should be qualified with when printing for the user, if
@@ -160,6 +170,9 @@ type QueryQualifyName = Module -> OccName -> QualifyName
-- a package name to disambiguate it.
type QueryQualifyModule = Module -> Bool
+-- | For a given package, we need to know whether to print it with
+-- the package key to disambiguate it.
+type QueryQualifyPackage = PackageKey -> Bool
-- See Note [Printing original names] in HscTypes
data QualifyName -- given P:M.T
@@ -172,6 +185,10 @@ data QualifyName -- given P:M.T
-- it is not in scope at all, and M.T is already bound in the
-- current scope, so we must refer to it as "P:M.T"
+reallyAlwaysQualifyNames :: QueryQualifyName
+reallyAlwaysQualifyNames _ _ = NameNotInScope2
+
+-- | NB: This won't ever show package IDs
alwaysQualifyNames :: QueryQualifyName
alwaysQualifyNames m _ = NameQual (moduleName m)
@@ -184,37 +201,53 @@ alwaysQualifyModules _ = True
neverQualifyModules :: QueryQualifyModule
neverQualifyModules _ = False
-alwaysQualify, neverQualify :: PrintUnqualified
-alwaysQualify = (alwaysQualifyNames, alwaysQualifyModules)
-neverQualify = (neverQualifyNames, neverQualifyModules)
+alwaysQualifyPackages :: QueryQualifyPackage
+alwaysQualifyPackages _ = True
+
+neverQualifyPackages :: QueryQualifyPackage
+neverQualifyPackages _ = False
+
+reallyAlwaysQualify, alwaysQualify, neverQualify :: PrintUnqualified
+reallyAlwaysQualify
+ = QueryQualify reallyAlwaysQualifyNames
+ alwaysQualifyModules
+ alwaysQualifyPackages
+alwaysQualify = QueryQualify alwaysQualifyNames
+ alwaysQualifyModules
+ alwaysQualifyPackages
+neverQualify = QueryQualify neverQualifyNames
+ neverQualifyModules
+ neverQualifyPackages
defaultUserStyle, defaultDumpStyle :: PprStyle
-defaultUserStyle = mkUserStyle alwaysQualify AllTheWay
+defaultUserStyle = mkUserStyle neverQualify AllTheWay
+ -- Print without qualifiers to reduce verbosity, unless -dppr-debug
defaultDumpStyle | opt_PprStyle_Debug = PprDebug
- | otherwise = PprDump
+ | otherwise = PprDump False
--- | Style for printing error messages
-mkErrStyle :: DynFlags -> PrintUnqualified -> PprStyle
-mkErrStyle dflags qual = mkUserStyle qual (PartWay (pprUserLength dflags))
+lineAnnotatedDumpStyle :: PprStyle
+lineAnnotatedDumpStyle = PprDump True
defaultErrStyle :: DynFlags -> PprStyle
--- Default style for error messages
+-- Default style for error messages, when we don't know PrintUnqualified
-- It's a bit of a hack because it doesn't take into account what's in scope
-- Only used for desugarer warnings, and typechecker errors in interface sigs
-defaultErrStyle dflags = mkUserStyle alwaysQualify depth
- where depth = if opt_PprStyle_Debug
- then AllTheWay
- else PartWay (pprUserLength dflags)
+-- NB that -dppr-debug will still get into PprDebug style
+defaultErrStyle dflags = mkErrStyle dflags neverQualify
+
+-- | Style for printing error messages
+mkErrStyle :: DynFlags -> PrintUnqualified -> PprStyle
+mkErrStyle dflags qual = mkUserStyle qual (PartWay (pprUserLength dflags))
+
+cmdlineParserStyle :: PprStyle
+cmdlineParserStyle = mkUserStyle alwaysQualify AllTheWay
mkUserStyle :: PrintUnqualified -> Depth -> PprStyle
mkUserStyle unqual depth
| opt_PprStyle_Debug = PprDebug
| otherwise = PprUser unqual depth
-
-cmdlineParserStyle :: PprStyle
-cmdlineParserStyle = PprUser alwaysQualify AllTheWay
\end{code}
Orthogonal to the above printing styles are (possibly) some
@@ -297,13 +330,22 @@ sdocWithPlatform f = sdocWithDynFlags (f . targetPlatform)
\begin{code}
qualName :: PprStyle -> QueryQualifyName
-qualName (PprUser (qual_name,_) _) mod occ = qual_name mod occ
+qualName (PprUser q _) mod occ = queryQualifyName q mod occ
qualName _other mod _ = NameQual (moduleName mod)
qualModule :: PprStyle -> QueryQualifyModule
-qualModule (PprUser (_,qual_mod) _) m = qual_mod m
+qualModule (PprUser q _) m = queryQualifyModule q m
qualModule _other _m = True
+qualPackage :: PprStyle -> QueryQualifyPackage
+qualPackage (PprUser q _) m = queryQualifyPackage q m
+qualPackage _other _m = True
+
+queryQual :: PprStyle -> PrintUnqualified
+queryQual s = QueryQualify (qualName s)
+ (qualModule s)
+ (qualPackage s)
+
codeStyle :: PprStyle -> Bool
codeStyle (PprCode _) = True
codeStyle _ = False
@@ -313,8 +355,8 @@ asmStyle (PprCode AsmStyle) = True
asmStyle _other = False
dumpStyle :: PprStyle -> Bool
-dumpStyle PprDump = True
-dumpStyle _other = False
+dumpStyle (PprDump _) = True
+dumpStyle _other = False
debugStyle :: PprStyle -> Bool
debugStyle PprDebug = True
@@ -398,7 +440,7 @@ showSDocDebug dflags d = renderWithStyle dflags d PprDebug
showSDocDumpOneLine :: DynFlags -> SDoc -> String
showSDocDumpOneLine dflags d
= Pretty.showDoc OneLineMode irrelevantNCols $
- runSDoc d (initSDocContext dflags PprDump)
+ runSDoc d (initSDocContext dflags (PprDump False))
showPpr :: Outputable a => DynFlags -> a -> String
showPpr dflags thing = showSDoc dflags (ppr thing)
@@ -459,8 +501,8 @@ cparen b d = SDoc $ Pretty.cparen b . runSDoc d
-- so that we don't get `foo''. Instead we just have foo'.
quotes d =
sdocWithDynFlags $ \dflags ->
- if useUnicodeQuotes dflags
- then char '‛' <> d <> char '’'
+ if useUnicode dflags
+ then char '‘' <> d <> char '’'
else SDoc $ \sty ->
let pp_d = runSDoc d sty
str = show pp_d
@@ -469,13 +511,19 @@ quotes d =
('\'' : _, _) -> pp_d
_other -> Pretty.quotes pp_d
-semi, comma, colon, equals, space, dcolon, arrow, underscore, dot :: SDoc
-darrow, lparen, rparen, lbrack, rbrack, lbrace, rbrace, blankLine :: SDoc
+semi, comma, colon, equals, space, dcolon, underscore, dot :: SDoc
+arrow, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt :: SDoc
+lparen, rparen, lbrack, rbrack, lbrace, rbrace, blankLine :: SDoc
blankLine = docToSDoc $ Pretty.ptext (sLit "")
-dcolon = docToSDoc $ Pretty.ptext (sLit "::")
-arrow = docToSDoc $ Pretty.ptext (sLit "->")
-darrow = docToSDoc $ Pretty.ptext (sLit "=>")
+dcolon = unicodeSyntax (char '∷') (docToSDoc $ Pretty.ptext (sLit "::"))
+arrow = unicodeSyntax (char '→') (docToSDoc $ Pretty.ptext (sLit "->"))
+larrow = unicodeSyntax (char 'â†') (docToSDoc $ Pretty.ptext (sLit "<-"))
+darrow = unicodeSyntax (char '⇒') (docToSDoc $ Pretty.ptext (sLit "=>"))
+arrowt = unicodeSyntax (char '↣') (docToSDoc $ Pretty.ptext (sLit ">-"))
+larrowt = unicodeSyntax (char '↢') (docToSDoc $ Pretty.ptext (sLit "-<"))
+arrowtt = unicodeSyntax (char '⤜') (docToSDoc $ Pretty.ptext (sLit ">>-"))
+larrowtt = unicodeSyntax (char '⤛') (docToSDoc $ Pretty.ptext (sLit "-<<"))
semi = docToSDoc $ Pretty.semi
comma = docToSDoc $ Pretty.comma
colon = docToSDoc $ Pretty.colon
@@ -490,6 +538,15 @@ rbrack = docToSDoc $ Pretty.rbrack
lbrace = docToSDoc $ Pretty.lbrace
rbrace = docToSDoc $ Pretty.rbrace
+forAllLit :: SDoc
+forAllLit = unicodeSyntax (char '∀') (ptext (sLit "forall"))
+
+unicodeSyntax :: SDoc -> SDoc -> SDoc
+unicodeSyntax unicode plain = sdocWithDynFlags $ \dflags ->
+ if useUnicode dflags && useUnicodeSyntax dflags
+ then unicode
+ else plain
+
nest :: Int -> SDoc -> SDoc
-- ^ Indent 'SDoc' some specified amount
(<>) :: SDoc -> SDoc -> SDoc
@@ -924,6 +981,16 @@ isOrAre _ = ptext (sLit "are")
\end{code}
+\begin{code}
+
+pprAnnotate :: String -> SDoc -> SDoc
+pprAnnotate name sdoc = SDoc $ \ctx ->
+ case sdocStyle ctx of
+ PprDump True -> Pretty.zeroWidthText name Pretty.<> runSDoc sdoc ctx
+ _other -> runSDoc sdoc ctx
+
+\end{code}
+
%************************************************************************
%* *
\subsection{Error handling}
@@ -979,7 +1046,7 @@ assertPprPanic file line msg
pprDebugAndThen :: DynFlags -> (String -> a) -> String -> SDoc -> a
pprDebugAndThen dflags cont heading pretty_msg
- = cont (showSDocDebug dflags doc)
+ = cont (showSDoc dflags doc)
where
doc = sep [text heading, nest 4 pretty_msg]
\end{code}
diff --git a/compiler/utils/Pair.lhs b/compiler/utils/Pair.lhs
index 9e847d695055..ca7c2a7f8e3f 100644
--- a/compiler/utils/Pair.lhs
+++ b/compiler/utils/Pair.lhs
@@ -3,6 +3,8 @@ A simple homogeneous pair type with useful Functor, Applicative, and
Traversable instances.
\begin{code}
+{-# LANGUAGE CPP #-}
+
module Pair ( Pair(..), unPair, toPair, swap ) where
#include "HsVersions.h"
diff --git a/compiler/utils/Panic.lhs b/compiler/utils/Panic.lhs
index fc04668ae16a..583174b201b8 100644
--- a/compiler/utils/Panic.lhs
+++ b/compiler/utils/Panic.lhs
@@ -8,6 +8,8 @@ It's hard to put these functions anywhere else without causing
some unnecessary loops in the module dependency graph.
\begin{code}
+{-# LANGUAGE CPP, DeriveDataTypeable, ScopedTypeVariables #-}
+
module Panic (
GhcException(..), showGhcException,
throwGhcException, throwGhcExceptionIO,
diff --git a/compiler/utils/Platform.hs b/compiler/utils/Platform.hs
index f69bb4cdf65e..ca8f0de862ef 100644
--- a/compiler/utils/Platform.hs
+++ b/compiler/utils/Platform.hs
@@ -12,6 +12,7 @@ module Platform (
target32Bit,
isARM,
osElfTarget,
+ osMachOTarget,
platformUsesFrameworks,
platformBinariesAreStaticLibs,
)
@@ -51,6 +52,7 @@ data Arch
, armISAExt :: [ArmISAExt]
, armABI :: ArmABI
}
+ | ArchARM64
| ArchAlpha
| ArchMipseb
| ArchMipsel
@@ -129,6 +131,11 @@ osElfTarget OSUnknown = False
-- portability, otherwise we have to answer this question for every
-- new platform we compile on (even unreg).
+-- | This predicate tells us whether the OS support Mach-O shared libraries.
+osMachOTarget :: OS -> Bool
+osMachOTarget OSDarwin = True
+osMachOTarget _ = False
+
osUsesFrameworks :: OS -> Bool
osUsesFrameworks OSDarwin = True
osUsesFrameworks OSiOS = True
diff --git a/compiler/utils/Pretty.lhs b/compiler/utils/Pretty.lhs
index fb7fe2b7fb66..f6a5a44e2ee7 100644
--- a/compiler/utils/Pretty.lhs
+++ b/compiler/utils/Pretty.lhs
@@ -152,7 +152,7 @@ Relative to John's original paper, there are the following new features:
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP, MagicHash #-}
module Pretty (
Doc, -- Abstract
diff --git a/compiler/utils/Serialized.hs b/compiler/utils/Serialized.hs
index 902d2feea06c..b1576a087f60 100644
--- a/compiler/utils/Serialized.hs
+++ b/compiler/utils/Serialized.hs
@@ -1,9 +1,10 @@
+{-# LANGUAGE CPP, RankNTypes, ScopedTypeVariables #-}
+
--
-- (c) The University of Glasgow 2002-2006
--
-- Serialized values
-{-# LANGUAGE ScopedTypeVariables #-}
module Serialized (
-- * Main Serialized data type
Serialized,
diff --git a/compiler/utils/State.hs b/compiler/utils/State.hs
index 0b6a2855627d..216034fdbf36 100644
--- a/compiler/utils/State.hs
+++ b/compiler/utils/State.hs
@@ -1,3 +1,4 @@
+{-# LANGUAGE UnboxedTuples #-}
module State (module State, mapAccumLM {- XXX hack -}) where
diff --git a/compiler/utils/StringBuffer.lhs b/compiler/utils/StringBuffer.lhs
index 46cce5864d1b..a54f45ffff9e 100644
--- a/compiler/utils/StringBuffer.lhs
+++ b/compiler/utils/StringBuffer.lhs
@@ -6,7 +6,7 @@
Buffers for scanning string input stored in external arrays.
\begin{code}
-{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE BangPatterns, CPP, MagicHash, UnboxedTuples #-}
{-# OPTIONS_GHC -O -funbox-strict-fields #-}
-- We always optimise this, otherwise performance of a non-optimised
-- compiler is severely affected
diff --git a/compiler/utils/UnVarGraph.hs b/compiler/utils/UnVarGraph.hs
new file mode 100644
index 000000000000..228f3b5220bb
--- /dev/null
+++ b/compiler/utils/UnVarGraph.hs
@@ -0,0 +1,136 @@
+{-
+
+Copyright (c) 2014 Joachim Breitner
+
+A data structure for undirected graphs of variables
+(or in plain terms: Sets of unordered pairs of numbers)
+
+
+This is very specifically tailored for the use in CallArity. In particular it
+stores the graph as a union of complete and complete bipartite graph, which
+would be very expensive to store as sets of edges or as adjanceny lists.
+
+It does not normalize the graphs. This means that g `unionUnVarGraph` g is
+equal to g, but twice as expensive and large.
+
+-}
+module UnVarGraph
+ ( UnVarSet
+ , emptyUnVarSet, mkUnVarSet, varEnvDom, unionUnVarSet, unionUnVarSets
+ , delUnVarSet
+ , elemUnVarSet, isEmptyUnVarSet
+ , UnVarGraph
+ , emptyUnVarGraph
+ , unionUnVarGraph, unionUnVarGraphs
+ , completeGraph, completeBipartiteGraph
+ , neighbors
+ , delNode
+ ) where
+
+import Id
+import VarEnv
+import UniqFM
+import Outputable
+import Data.List
+import Bag
+import Unique
+
+import qualified Data.IntSet as S
+
+-- We need a type for sets of variables (UnVarSet).
+-- We do not use VarSet, because for that we need to have the actual variable
+-- at hand, and we do not have that when we turn the domain of a VarEnv into a UnVarSet.
+-- Therefore, use a IntSet directly (which is likely also a bit more efficient).
+
+-- Set of uniques, i.e. for adjancet nodes
+newtype UnVarSet = UnVarSet (S.IntSet)
+ deriving Eq
+
+k :: Var -> Int
+k v = getKey (getUnique v)
+
+emptyUnVarSet :: UnVarSet
+emptyUnVarSet = UnVarSet S.empty
+
+elemUnVarSet :: Var -> UnVarSet -> Bool
+elemUnVarSet v (UnVarSet s) = k v `S.member` s
+
+
+isEmptyUnVarSet :: UnVarSet -> Bool
+isEmptyUnVarSet (UnVarSet s) = S.null s
+
+delUnVarSet :: UnVarSet -> Var -> UnVarSet
+delUnVarSet (UnVarSet s) v = UnVarSet $ k v `S.delete` s
+
+mkUnVarSet :: [Var] -> UnVarSet
+mkUnVarSet vs = UnVarSet $ S.fromList $ map k vs
+
+varEnvDom :: VarEnv a -> UnVarSet
+varEnvDom ae = UnVarSet $ ufmToSet_Directly ae
+
+unionUnVarSet :: UnVarSet -> UnVarSet -> UnVarSet
+unionUnVarSet (UnVarSet set1) (UnVarSet set2) = UnVarSet (set1 `S.union` set2)
+
+unionUnVarSets :: [UnVarSet] -> UnVarSet
+unionUnVarSets = foldr unionUnVarSet emptyUnVarSet
+
+instance Outputable UnVarSet where
+ ppr (UnVarSet s) = braces $
+ hcat $ punctuate comma [ ppr (getUnique i) | i <- S.toList s]
+
+
+-- The graph type. A list of complete bipartite graphs
+data Gen = CBPG UnVarSet UnVarSet -- complete bipartite
+ | CG UnVarSet -- complete
+newtype UnVarGraph = UnVarGraph (Bag Gen)
+
+emptyUnVarGraph :: UnVarGraph
+emptyUnVarGraph = UnVarGraph emptyBag
+
+unionUnVarGraph :: UnVarGraph -> UnVarGraph -> UnVarGraph
+{-
+Premature optimisation, it seems.
+unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4])
+ | s1 == s3 && s2 == s4
+ = pprTrace "unionUnVarGraph fired" empty $
+ completeGraph (s1 `unionUnVarSet` s2)
+unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4])
+ | s2 == s3 && s1 == s4
+ = pprTrace "unionUnVarGraph fired2" empty $
+ completeGraph (s1 `unionUnVarSet` s2)
+-}
+unionUnVarGraph (UnVarGraph g1) (UnVarGraph g2)
+ = -- pprTrace "unionUnVarGraph" (ppr (length g1, length g2)) $
+ UnVarGraph (g1 `unionBags` g2)
+
+unionUnVarGraphs :: [UnVarGraph] -> UnVarGraph
+unionUnVarGraphs = foldl' unionUnVarGraph emptyUnVarGraph
+
+-- completeBipartiteGraph A B = { {a,b} | a ∈ A, b ∈ B }
+completeBipartiteGraph :: UnVarSet -> UnVarSet -> UnVarGraph
+completeBipartiteGraph s1 s2 = prune $ UnVarGraph $ unitBag $ CBPG s1 s2
+
+completeGraph :: UnVarSet -> UnVarGraph
+completeGraph s = prune $ UnVarGraph $ unitBag $ CG s
+
+neighbors :: UnVarGraph -> Var -> UnVarSet
+neighbors (UnVarGraph g) v = unionUnVarSets $ concatMap go $ bagToList g
+ where go (CG s) = (if v `elemUnVarSet` s then [s] else [])
+ go (CBPG s1 s2) = (if v `elemUnVarSet` s1 then [s2] else []) ++
+ (if v `elemUnVarSet` s2 then [s1] else [])
+
+delNode :: UnVarGraph -> Var -> UnVarGraph
+delNode (UnVarGraph g) v = prune $ UnVarGraph $ mapBag go g
+ where go (CG s) = CG (s `delUnVarSet` v)
+ go (CBPG s1 s2) = CBPG (s1 `delUnVarSet` v) (s2 `delUnVarSet` v)
+
+prune :: UnVarGraph -> UnVarGraph
+prune (UnVarGraph g) = UnVarGraph $ filterBag go g
+ where go (CG s) = not (isEmptyUnVarSet s)
+ go (CBPG s1 s2) = not (isEmptyUnVarSet s1) && not (isEmptyUnVarSet s2)
+
+instance Outputable Gen where
+ ppr (CG s) = ppr s <> char '²'
+ ppr (CBPG s1 s2) = ppr s1 <+> char 'x' <+> ppr s2
+instance Outputable UnVarGraph where
+ ppr (UnVarGraph g) = ppr g
diff --git a/compiler/utils/UniqFM.lhs b/compiler/utils/UniqFM.lhs
index 52cd3dd7915e..d8e08f599ab1 100644
--- a/compiler/utils/UniqFM.lhs
+++ b/compiler/utils/UniqFM.lhs
@@ -20,9 +20,9 @@ and ``addToUFM\_C'' and ``Data.IntMap.insertWith'' differ in the order
of arguments of combining function.
\begin{code}
-{-# LANGUAGE DeriveTraversable, GeneralizedNewtypeDeriving #-}
+{-# LANGUAGE DeriveTraversable, DeriveDataTypeable, GeneralizedNewtypeDeriving #-}
+{-# OPTIONS_GHC -Wall #-}
-{-# OPTIONS -Wall #-}
module UniqFM (
-- * Unique-keyed mappings
UniqFM, -- abstract type
@@ -58,10 +58,12 @@ module UniqFM (
lookupUFM, lookupUFM_Directly,
lookupWithDefaultUFM, lookupWithDefaultUFM_Directly,
eltsUFM, keysUFM, splitUFM,
+ ufmToSet_Directly,
ufmToList,
- joinUFM
+ joinUFM, pprUniqFM
) where
+import FastString
import Unique ( Uniquable(..), Unique, getKey )
import Outputable
@@ -69,6 +71,7 @@ import Compiler.Hoopl hiding (Unique)
import Data.Function (on)
import qualified Data.IntMap as M
+import qualified Data.IntSet as S
import qualified Data.Foldable as Foldable
import qualified Data.Traversable as Traversable
import Data.Typeable
@@ -180,6 +183,7 @@ lookupWithDefaultUFM_Directly
:: UniqFM elt -> elt -> Unique -> elt
keysUFM :: UniqFM elt -> [Unique] -- Get the keys
eltsUFM :: UniqFM elt -> [elt]
+ufmToSet_Directly :: UniqFM elt -> S.IntSet
ufmToList :: UniqFM elt -> [(Unique, elt)]
\end{code}
@@ -293,6 +297,7 @@ lookupWithDefaultUFM (UFM m) v k = M.findWithDefault v (getKey $ getUnique k) m
lookupWithDefaultUFM_Directly (UFM m) v u = M.findWithDefault v (getKey u) m
keysUFM (UFM m) = map getUnique $ M.keys m
eltsUFM (UFM m) = M.elems m
+ufmToSet_Directly (UFM m) = M.keysSet m
ufmToList (UFM m) = map (\(k, v) -> (getUnique k, v)) $ M.toList m
-- Hoopl
@@ -315,5 +320,11 @@ joinUFM eltJoin l (OldFact old) (NewFact new) = foldUFM_Directly add (NoChange,
\begin{code}
instance Outputable a => Outputable (UniqFM a) where
- ppr ufm = ppr (ufmToList ufm)
+ ppr ufm = pprUniqFM ppr ufm
+
+pprUniqFM :: (a -> SDoc) -> UniqFM a -> SDoc
+pprUniqFM ppr_elt ufm
+ = brackets $ fsep $ punctuate comma $
+ [ ppr uq <+> ptext (sLit ":->") <+> ppr_elt elt
+ | (uq, elt) <- ufmToList ufm ]
\end{code}
diff --git a/compiler/utils/Util.lhs b/compiler/utils/Util.lhs
index 5c82c757aa5b..2dcc73fd890d 100644
--- a/compiler/utils/Util.lhs
+++ b/compiler/utils/Util.lhs
@@ -3,6 +3,7 @@
%
\begin{code}
+{-# LANGUAGE CPP #-}
-- | Highly random utility functions
--
@@ -46,7 +47,7 @@ module Util (
nTimes,
-- * Sorting
- sortWith, minWith,
+ sortWith, minWith, nubSort,
-- * Comparisons
isEqual, eqListBy, eqMaybeBy,
@@ -125,6 +126,7 @@ import Data.Ord ( comparing )
import Data.Bits
import Data.Word
import qualified Data.IntMap as IM
+import qualified Data.Set as Set
import Data.Time
#if __GLASGOW_HASKELL__ < 705
@@ -489,6 +491,9 @@ sortWith get_key xs = sortBy (comparing get_key) xs
minWith :: Ord b => (a -> b) -> [a] -> a
minWith get_key xs = ASSERT( not (null xs) )
head (sortWith get_key xs)
+
+nubSort :: Ord a => [a] -> [a]
+nubSort = Set.toAscList . Set.fromList
\end{code}
%************************************************************************
diff --git a/compiler/vectorise/Vectorise.hs b/compiler/vectorise/Vectorise.hs
index 012ae37039cf..38bd55482a39 100644
--- a/compiler/vectorise/Vectorise.hs
+++ b/compiler/vectorise/Vectorise.hs
@@ -351,6 +351,6 @@ tryConvert var vect_var rhs
= fromVect (idType var) (Var vect_var)
`orElseErrV`
do
- { emitVt " Could NOT call vectorised from original version" $ ppr var
+ { emitVt " Could NOT call vectorised from original version" $ ppr var <+> dcolon <+> ppr (idType var)
; return rhs
}
diff --git a/compiler/vectorise/Vectorise/Exp.hs b/compiler/vectorise/Vectorise/Exp.hs
index fb0c14861067..6adb9ec435d5 100644
--- a/compiler/vectorise/Vectorise/Exp.hs
+++ b/compiler/vectorise/Vectorise/Exp.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE TupleSections #-}
+{-# LANGUAGE CPP, TupleSections #-}
-- |Vectorisation of expressions.
diff --git a/compiler/vectorise/Vectorise/Generic/PAMethods.hs b/compiler/vectorise/Vectorise/Generic/PAMethods.hs
index 269119c6dd01..0d5d37c7d7af 100644
--- a/compiler/vectorise/Vectorise/Generic/PAMethods.hs
+++ b/compiler/vectorise/Vectorise/Generic/PAMethods.hs
@@ -16,7 +16,7 @@ import Vectorise.Generic.Description
import CoreSyn
import CoreUtils
import FamInstEnv
-import MkCore ( mkWildCase )
+import MkCore ( mkWildCase, mkCoreLet )
import TyCon
import CoAxiom
import Type
@@ -24,6 +24,7 @@ import OccName
import Coercion
import MkId
import FamInst
+import TysPrim( intPrimTy )
import DynFlags
import FastString
@@ -404,9 +405,13 @@ buildToArrPReprs vect_tc repr_co _ pdatas_tc r
-- and PDatas Void arrays in the product. See Note [Empty PDatas].
let xSums = App (repr_selsLength_v ss) (Var sels)
- (vars, exprs) <- mapAndUnzipM (to_con xSums) (repr_cons ss)
+ xSums_var <- newLocalVar (fsLit "xsum") intPrimTy
+
+ (vars, exprs) <- mapAndUnzipM (to_con xSums_var) (repr_cons ss)
return ( sels : concat vars
, wrapFamInstBody psums_tc (repr_con_tys ss)
+ $ mkCoreLet (NonRec xSums_var xSums)
+ -- mkCoreLet ensures that the let/app invariant holds
$ mkConApp psums_con
$ map Type (repr_con_tys ss) ++ (Var sels : exprs))
@@ -414,7 +419,7 @@ buildToArrPReprs vect_tc repr_co _ pdatas_tc r
= case ss of
EmptyProd
-> do pvoids <- builtin pvoidsVar
- return ([], App (Var pvoids) xSums )
+ return ([], App (Var pvoids) (Var xSums) )
UnaryProd r
-> do pty <- mkPDatasType (compOrigType r)
diff --git a/compiler/vectorise/Vectorise/Generic/PData.hs b/compiler/vectorise/Vectorise/Generic/PData.hs
index 37358c9bdf74..387d49c3ad91 100644
--- a/compiler/vectorise/Vectorise/Generic/PData.hs
+++ b/compiler/vectorise/Vectorise/Generic/PData.hs
@@ -45,7 +45,7 @@ buildDataFamInst :: Name -> TyCon -> TyCon -> AlgTyConRhs -> VM FamInst
buildDataFamInst name' fam_tc vect_tc rhs
= do { axiom_name <- mkDerivedName mkInstTyCoOcc name'
- ; (_, tyvars') <- liftDs $ tcInstSkolTyVarsLoc (getSrcSpan name') tyvars
+ ; (_, tyvars') <- liftDs $ tcInstSigTyVarsLoc (getSrcSpan name') tyvars
; let ax = mkSingleCoAxiom axiom_name tyvars' fam_tc pat_tys rep_ty
tys' = mkTyVarTys tyvars'
rep_ty = mkTyConApp rep_tc tys'
diff --git a/compiler/vectorise/Vectorise/Monad/InstEnv.hs b/compiler/vectorise/Vectorise/Monad/InstEnv.hs
index 84b29ceb6119..a97f319b4f0a 100644
--- a/compiler/vectorise/Vectorise/Monad/InstEnv.hs
+++ b/compiler/vectorise/Vectorise/Monad/InstEnv.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
module Vectorise.Monad.InstEnv
( existsInst
, lookupInst
diff --git a/compiler/vectorise/Vectorise/Monad/Naming.hs b/compiler/vectorise/Vectorise/Monad/Naming.hs
index def1ffa58c67..b53324012ff6 100644
--- a/compiler/vectorise/Vectorise/Monad/Naming.hs
+++ b/compiler/vectorise/Vectorise/Monad/Naming.hs
@@ -24,6 +24,7 @@ import Name
import SrcLoc
import MkId
import Id
+import IdInfo( IdDetails(VanillaId) )
import FastString
import Control.Monad
@@ -67,7 +68,7 @@ mkVectId :: Id -> Type -> VM Id
mkVectId id ty
= do { name <- mkLocalisedName mkVectOcc (getName id)
; let id' | isDFunId id = MkId.mkDictFunId name tvs theta cls tys
- | isExportedId id = Id.mkExportedLocalId name ty
+ | isExportedId id = Id.mkExportedLocalId VanillaId name ty
| otherwise = Id.mkLocalId name ty
; return id'
}
@@ -91,8 +92,8 @@ newExportedVar occ_name ty
u <- liftDs newUnique
let name = mkExternalName u mod occ_name noSrcSpan
-
- return $ Id.mkExportedLocalId name ty
+
+ return $ Id.mkExportedLocalId VanillaId name ty
-- |Make a fresh local variable with the given type.
-- The variable's name is formed using the given string as the prefix.
diff --git a/compiler/vectorise/Vectorise/Type/Env.hs b/compiler/vectorise/Vectorise/Type/Env.hs
index 34008efbbd4d..6ee5ca6cd9e5 100644
--- a/compiler/vectorise/Vectorise/Type/Env.hs
+++ b/compiler/vectorise/Vectorise/Type/Env.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
-- Vectorise a modules type and class declarations.
--
-- This produces new type constructors and family instances top be included in the module toplevel
diff --git a/compiler/vectorise/Vectorise/Type/TyConDecl.hs b/compiler/vectorise/Vectorise/Type/TyConDecl.hs
index a8159b09f49a..37a07f710def 100644
--- a/compiler/vectorise/Vectorise/Type/TyConDecl.hs
+++ b/compiler/vectorise/Vectorise/Type/TyConDecl.hs
@@ -59,7 +59,6 @@ vectTyConDecl tycon name'
-- NB: 'buildClass' attaches new quantifiers and dictionaries to the method types
; cls' <- liftDs $
buildClass
- False -- include unfoldings on dictionary selectors
name' -- new name: "V:Class"
(tyConTyVars tycon) -- keep original type vars
(map (const Nominal) (tyConRoles tycon)) -- all role are N for safety
diff --git a/compiler/vectorise/Vectorise/Utils/Base.hs b/compiler/vectorise/Vectorise/Utils/Base.hs
index cb7b34e36a1e..7d4bae3046d0 100644
--- a/compiler/vectorise/Vectorise/Utils/Base.hs
+++ b/compiler/vectorise/Vectorise/Utils/Base.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE CPP #-}
+
module Vectorise.Utils.Base
( voidType
, newLocalVVar
diff --git a/configure.ac b/configure.ac
index 744cebdf3c7e..2e9958c39c11 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@ dnl
# see what flags are available. (Better yet, read the documentation!)
#
-AC_INIT([The Glorious Glasgow Haskell Compilation System], [7.7], [glasgow-haskell-bugs@haskell.org], [ghc])
+AC_INIT([The Glorious Glasgow Haskell Compilation System], [7.9], [glasgow-haskell-bugs@haskell.org], [ghc])
# Set this to YES for a released version, otherwise NO
: ${RELEASE=NO}
@@ -34,7 +34,7 @@ fi
AC_SUBST([CONFIGURE_ARGS], [$ac_configure_args])
dnl ----------------------------------------------------------
-dnl ** Find unixy sort and find commands,
+dnl ** Find unixy sort and find commands,
dnl ** which are needed by FP_SETUP_PROJECT_VERSION
dnl ** Find find command (for Win32's benefit)
@@ -91,7 +91,7 @@ AC_ARG_WITH([ghc],
WithGhc="$GHC"])
dnl ** Tell the make system which OS we are using
-dnl $OSTYPE is set by the operating system to "msys" or "cygwin" or something
+dnl $OSTYPE is set by the operating system to "msys" or "cygwin" or something
AC_SUBST(OSTYPE)
AC_ARG_ENABLE(bootstrap-with-devel-snapshot,
@@ -162,6 +162,11 @@ FP_COMPARE_VERSIONS([$GhcVersion],[-gt],[7.7],
CMM_SINK_BOOTSTRAP_IS_NEEDED=NO)
AC_SUBST(CMM_SINK_BOOTSTRAP_IS_NEEDED)
+FP_COMPARE_VERSIONS([$GhcVersion],[-lt],[7.9],
+ SUPPORTS_PACKAGE_KEY=NO,
+ SUPPORTS_PACKAGE_KEY=YES)
+AC_SUBST(SUPPORTS_PACKAGE_KEY)
+
# GHC is passed to Cabal, so we need a native path
if test "${WithGhc}" != ""
then
@@ -228,7 +233,7 @@ case $host in
# here we go with the test
MINOR=`uname -r|cut -d '.' -f 2-`
if test "$MINOR" -lt "11"; then
- SOLARIS_BROKEN_SHLD=YES
+ SOLARIS_BROKEN_SHLD=YES
fi
;;
esac
@@ -367,7 +372,7 @@ AS_IF([test "x$with_system_libffi" = "xyes"],
AC_SUBST(UseSystemLibFFI)
AC_ARG_WITH([ffi-includes],
-[AC_HELP_STRING([--with-ffi-includes=ARG]
+[AC_HELP_STRING([--with-ffi-includes=ARG],
[Find includes for libffi in ARG [default=system default]])
],
[
@@ -382,7 +387,7 @@ AC_ARG_WITH([ffi-includes],
AC_SUBST(FFIIncludeDir)
AC_ARG_WITH([ffi-libraries],
-[AC_HELP_STRING([--with-ffi-libraries=ARG]
+[AC_HELP_STRING([--with-ffi-libraries=ARG],
[Find libffi in ARG [default=system default]])
],
[
@@ -474,6 +479,66 @@ FIND_GCC([WhatGccIsCalled], [gcc], [gcc])
CC="$WhatGccIsCalled"
export CC
+# If --with-gcc was used, and we're not cross-compiling, then it also
+# applies to the stage0 compiler.
+MAYBE_OVERRIDE_STAGE0([gcc],[CC_STAGE0])
+MAYBE_OVERRIDE_STAGE0([ar],[AR_STAGE0])
+
+dnl ** what cpp to use?
+dnl --------------------------------------------------------------
+AC_ARG_WITH(hs-cpp,
+[AC_HELP_STRING([--with-hs-cpp=ARG],
+ [Use ARG as the path to cpp [default=autodetect]])],
+[
+ if test "$HostOS" = "mingw32"
+ then
+ AC_MSG_WARN([Request to use $withval will be ignored])
+ else
+ HaskellCPPCmd=$withval
+ fi
+],
+[
+ HaskellCPPCmd=$WhatGccIsCalled
+]
+)
+
+
+
+dnl ** what cpp flags to use?
+dnl -----------------------------------------------------------
+AC_ARG_WITH(hs-cpp-flags,
+ [AC_HELP_STRING([--with-hs-cpp-flags=ARG],
+ [Use ARG as the path to hs cpp [default=autodetect]])],
+ [
+ if test "$HostOS" = "mingw32"
+ then
+ AC_MSG_WARN([Request to use $withval will be ignored])
+ else
+ HaskellCPPArgs=$withval
+ fi
+ ],
+[
+ $HaskellCPPCmd -x c /dev/null -dM -E > conftest.txt 2>&1
+ if grep "__clang__" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="-E -undef -traditional -Wno-invalid-pp-token -Wno-unicode -Wno-trigraphs "
+ else
+ $HaskellCPPCmd -v > conftest.txt 2>&1
+ if grep "gcc" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="-E -undef -traditional "
+ else
+ $HaskellCPPCmd --version > conftest.txt 2>&1
+ if grep "cpphs" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="--cpp -traditional"
+ else
+ AC_MSG_WARN([configure can't recognize your CPP program, you may need to set --with-hs-cpp-flags=FLAGS explicitly])
+ HaskellCPPArgs=""
+ fi
+ fi
+ fi
+ ]
+)
+
+
dnl ** Which ld to use?
dnl --------------------------------------------------------------
FP_ARG_WITH_PATH_GNU_PROG([LD], [ld], [ld])
@@ -486,6 +551,20 @@ FP_ARG_WITH_PATH_GNU_PROG([NM], [nm], [nm])
NmCmd="$NM"
AC_SUBST([NmCmd])
+dnl ** Which ar to use?
+dnl --------------------------------------------------------------
+FP_ARG_WITH_PATH_GNU_PROG([AR], [ar], [ar])
+ArCmd="$AR"
+fp_prog_ar="$AR"
+AC_SUBST([ArCmd])
+
+dnl ** Which ranlib to use?
+dnl --------------------------------------------------------------
+FP_ARG_WITH_PATH_GNU_PROG([RANLIB], [ranlib], [ranlib])
+RanlibCmd="$RANLIB"
+RANLIB="$RanlibCmd"
+
+
# Note: we may not have objdump on OS X, and we only need it on Windows (for DLL checks)
case $HostOS_CPP in
cygwin32|mingw32)
@@ -569,8 +648,8 @@ FP_PROG_LD_FILELIST
FPTOOLS_SET_C_LD_FLAGS([target],[CFLAGS],[LDFLAGS],[IGNORE_LINKER_LD_FLAGS],[CPPFLAGS])
FPTOOLS_SET_C_LD_FLAGS([build],[CONF_CC_OPTS_STAGE0],[CONF_GCC_LINKER_OPTS_STAGE0],[CONF_LD_LINKER_OPTS_STAGE0],[CONF_CPP_OPTS_STAGE0])
FPTOOLS_SET_C_LD_FLAGS([target],[CONF_CC_OPTS_STAGE1],[CONF_GCC_LINKER_OPTS_STAGE1],[CONF_LD_LINKER_OPTS_STAGE1],[CONF_CPP_OPTS_STAGE1])
-# Stage 3 won't be supported by cross-compilation
FPTOOLS_SET_C_LD_FLAGS([target],[CONF_CC_OPTS_STAGE2],[CONF_GCC_LINKER_OPTS_STAGE2],[CONF_LD_LINKER_OPTS_STAGE2],[CONF_CPP_OPTS_STAGE2])
+# Stage 3 won't be supported by cross-compilation
FP_GCC_EXTRA_FLAGS
@@ -604,9 +683,6 @@ chmod +x install-sh
dnl ** figure out how to do a BSD-ish install
AC_PROG_INSTALL
-dnl If you can run configure, you certainly have /bin/sh
-AC_DEFINE([HAVE_BIN_SH], [1], [Define to 1 if you have /bin/sh.])
-
dnl ** how to invoke `ar' and `ranlib'
FP_PROG_AR_SUPPORTS_ATFILE
FP_PROG_AR_NEEDS_RANLIB
@@ -744,30 +820,6 @@ FP_CHECK_FUNC([WinExec],
FP_CHECK_FUNC([GetModuleFileName],
[@%:@include ], [GetModuleFileName((HMODULE)0,(LPTSTR)0,0)])
-dnl ** check return type of signal handlers
-dnl Foo: assumes we can use prototypes.
-dnl On BCC, signal handlers have type "int(void)", elsewhere its "void(int)".
-dnl AC_CACHE_CHECK([type of signal handlers], ac_cv_type_signal_handler,
-dnl [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include
-dnl #include
-dnl #ifdef signal
-dnl #undef signal
-dnl #endif
-dnl void (*signal (int, void (*)(int)))(int);
-dnl ]],
-dnl [[int i;]])],
-dnl [ac_cv_type_signal_handler=void_int],
-dnl [ac_cv_type_signal_handler=int_void])])
-dnl if test "$ac_cv_type_signal_handler" = void_int; then
-dnl AC_DEFINE(VOID_INT_SIGNALS)
-dnl fi
-
-dnl On BCC, signal handlers have type "int(void)", elsewhere its "void(int)".
-AC_TYPE_SIGNAL
-if test "$ac_cv_type_signal" = void; then
- AC_DEFINE([VOID_INT_SIGNALS], [1], [Define to 1 if signal handlers have type void (*)(int). Otherwise, they're assumed to have type int (*)(void).])
-fi
-
dnl ** check for more functions
dnl ** The following have been verified to be used in ghc/, but might be used somewhere else, too.
AC_CHECK_FUNCS([getclock getrusage gettimeofday setitimer siginterrupt sysconf times ctime_r sched_setaffinity setlocale])
@@ -861,7 +913,21 @@ dnl ** check for eventfd which is needed by the I/O manager
AC_CHECK_HEADERS([sys/eventfd.h])
AC_CHECK_FUNCS([eventfd])
-# checking for PAPI
+dnl ** Check for __thread support in the compiler
+AC_MSG_CHECKING(for __thread support)
+AC_COMPILE_IFELSE(
+ [ AC_LANG_SOURCE([[__thread int tester = 0;]]) ],
+ [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE([CC_SUPPORTS_TLS],[1],[Define to 1 if __thread is supported])
+ ],
+ [
+ AC_MSG_RESULT(no)
+ AC_DEFINE([CC_SUPPORTS_TLS],[0],[Define to 1 if __thread is supported])
+ ])
+
+
+dnl ** checking for PAPI
AC_CHECK_LIB(papi, PAPI_library_init, HavePapiLib=YES, HavePapiLib=NO)
AC_CHECK_HEADER([papi.h], [HavePapiHeader=YES], [HavePapiHeader=NO])
AC_SUBST(HavePapiLib)
@@ -876,6 +942,21 @@ else
fi
AC_SUBST(HavePapi)
+# Check for perf_event (only more current Linux)
+AC_CHECK_HEADER([linux/perf_event.h], [HavePerfEvent=YES], [HavePerfEvent=NO])
+AC_SUBST(HavePerfEvent)
+
+# Check for libdwarf
+AC_CHECK_HEADER([libdwarf.h], [HaveDwarfHeader=YES], [HaveLibDwarfHeader=NO])
+AC_CHECK_LIB(dwarf, dwarf_init, [HaveDwarfLib=YES], [HaveDwarfLib=NO])
+AC_CHECK_LIB(elf, elf_begin, [HaveElfLib=YES], [HaveElfLib=NO])
+if test "$HaveDwarfLib" = "YES" -a "$HaveDwarfHeader" = "YES" -a "$HaveElfLib" = "YES"; then
+ HaveLibDwarf=YES
+else
+ HaveLibDwarf=NO
+fi
+AC_SUBST(HaveLibDwarf)
+
if test "$HAVE_DOCBOOK_XSL" = "NO" ||
test "$XsltprocCmd" = ""
then
@@ -958,13 +1039,14 @@ echo ["\
Using $CompilerName : $WhatGccIsCalled
which is version : $GccVersion
Building a cross compiler : $CrossCompiling
-
- ld : $LdCmd
- Happy : $HappyCmd ($HappyVersion)
- Alex : $AlexCmd ($AlexVersion)
- Perl : $PerlCmd
- dblatex : $DblatexCmd
- xsltproc : $XsltprocCmd
+ cpp : $HaskellCPPCmd
+ cpp-flags : $HaskellCPPArgs
+ ld : $LdCmd
+ Happy : $HappyCmd ($HappyVersion)
+ Alex : $AlexCmd ($AlexVersion)
+ Perl : $PerlCmd
+ dblatex : $DblatexCmd
+ xsltproc : $XsltprocCmd
Using LLVM tools
llc : $LlcCmd
diff --git a/distrib/compare/FilenameDescr.hs b/distrib/compare/FilenameDescr.hs
index a0f53fd2a0df..bf2a50ec9bda 100644
--- a/distrib/compare/FilenameDescr.hs
+++ b/distrib/compare/FilenameDescr.hs
@@ -10,7 +10,7 @@ import Utils
import Tar
-- We can't just compare plain filenames, because versions numbers of GHC
--- and the libaries will vary. So we use FilenameDescr instead, which
+-- and the libraries will vary. So we use FilenameDescr instead, which
-- abstracts out the version numbers.
type FilenameDescr = [FilenameDescrBit]
data FilenameDescrBit = VersionOf String
diff --git a/distrib/compare/Makefile b/distrib/compare/Makefile
index f65c0419eb77..49645783e20b 100644
--- a/distrib/compare/Makefile
+++ b/distrib/compare/Makefile
@@ -2,7 +2,7 @@
GHC = ghc
compare: *.hs
- "$(GHC)" -O --make -Wall -Werror $@
+ "$(GHC)" -O -XHaskell2010 --make -Wall -Werror $@
.PHONY: clean
clean:
diff --git a/distrib/compare/compare.hs b/distrib/compare/compare.hs
index 81055c2826d9..8653e3f6aa2d 100644
--- a/distrib/compare/compare.hs
+++ b/distrib/compare/compare.hs
@@ -1,5 +1,3 @@
-{-# LANGUAGE PatternGuards #-}
-
module Main (main) where
import Control.Monad.State
diff --git a/distrib/configure.ac.in b/distrib/configure.ac.in
index 4a6944fe17c9..c7a8ead9b0b2 100644
--- a/distrib/configure.ac.in
+++ b/distrib/configure.ac.in
@@ -63,6 +63,65 @@ FIND_GCC([WhatGccIsCalled], [gcc], [gcc])
CC="$WhatGccIsCalled"
export CC
+
+dnl ** what cpp to use?
+dnl --------------------------------------------------------------
+AC_ARG_WITH(hs-cpp,
+[AC_HELP_STRING([--with-hs-cpp=ARG],
+ [Use ARG as the path to cpp [default=autodetect]])],
+[
+ if test "$HostOS" = "mingw32"
+ then
+ AC_MSG_WARN([Request to use $withval will be ignored])
+ else
+ HaskellCPPCmd=$withval
+ fi
+],
+[
+ if test "$HostOS" != "mingw32"
+ then
+ HaskellCPPCmd=$WhatGccIsCalled
+ fi
+]
+)
+
+
+
+dnl ** what cpp flags to use?
+dnl -----------------------------------------------------------
+AC_ARG_WITH(hs-cpp-flags,
+ [AC_HELP_STRING([--with-hs-cpp-flags=ARG],
+ [Use ARG as the path to hs cpp [default=autodetect]])],
+ [
+ if test "$HostOS" = "mingw32"
+ then
+ AC_MSG_WARN([Request to use $withval will be ignored])
+ else
+ HaskellCPPArgs=$withval
+ fi
+ ],
+[
+ $HaskellCPPCmd -x c /dev/null -dM -E > conftest.txt 2>&1
+ if grep "__clang__" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="-E -undef -traditional -Wno-invalid-pp-token -Wno-unicode -Wno-trigraphs "
+ else
+ $HaskellCPPCmd -v > conftest.txt 2>&1
+ if grep "gcc" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="-E -undef -traditional "
+ else
+ $HaskellCPPCmd --version > conftest.txt 2>&1
+ if grep "cpphs" conftest.txt >/dev/null 2>&1; then
+ HaskellCPPArgs="--cpp -traditional"
+ else
+ AC_MSG_WARN([configure can't recognize your CPP program, you may need to set --with-hs-cpp-flags=FLAGS explicitly])
+ HaskellCPPArgs=""
+ fi
+ fi
+ fi
+ ]
+)
+
+
dnl ** Which ld to use?
dnl --------------------------------------------------------------
FP_ARG_WITH_PATH_GNU_PROG([LD], [ld], [ld])
@@ -114,6 +173,7 @@ AC_SUBST(WordSize)
#
dnl ** how to invoke `ar' and `ranlib'
#
+FP_PROG_AR_SUPPORTS_ATFILE
FP_PROG_AR_NEEDS_RANLIB
FP_SETTINGS
diff --git a/distrib/remilestoning.pl b/distrib/remilestoning.pl
index a544ddaf1792..60a23af518dd 100644
--- a/distrib/remilestoning.pl
+++ b/distrib/remilestoning.pl
@@ -1,5 +1,6 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
+use warnings;
use strict;
use DBI;
diff --git a/docs/backpack/.gitignore b/docs/backpack/.gitignore
new file mode 100644
index 000000000000..c3eb46ecd66c
--- /dev/null
+++ b/docs/backpack/.gitignore
@@ -0,0 +1,10 @@
+*.aux
+*.bak
+*.bbl
+*.blg
+*.dvi
+*.fdb_latexmk
+*.fls
+*.log
+*.synctex.gz
+backpack-impl.pdf
diff --git a/docs/backpack/Makefile b/docs/backpack/Makefile
new file mode 100644
index 000000000000..0dd7a9dad511
--- /dev/null
+++ b/docs/backpack/Makefile
@@ -0,0 +1,2 @@
+backpack-impl.pdf: backpack-impl.tex
+ latexmk -pdf -latexoption=-halt-on-error -latexoption=-file-line-error -latexoption=-synctex=1 backpack-impl.tex && touch paper.dvi || ! rm -f $@
diff --git a/docs/backpack/arch.png b/docs/backpack/arch.png
new file mode 100644
index 000000000000..d8b8fd21f9ec
Binary files /dev/null and b/docs/backpack/arch.png differ
diff --git a/docs/backpack/backpack-impl.bib b/docs/backpack/backpack-impl.bib
new file mode 100644
index 000000000000..6bda35a8ea0b
--- /dev/null
+++ b/docs/backpack/backpack-impl.bib
@@ -0,0 +1,17 @@
+@inproceedings{Kilpatrick:2014:BRH:2535838.2535884,
+ author = {Kilpatrick, Scott and Dreyer, Derek and Peyton Jones, Simon and Marlow, Simon},
+ title = {Backpack: Retrofitting Haskell with Interfaces},
+ booktitle = {Proceedings of the 41st ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
+ series = {POPL '14},
+ year = {2014},
+ isbn = {978-1-4503-2544-8},
+ location = {San Diego, California, USA},
+ pages = {19--31},
+ numpages = {13},
+ url = {http://doi.acm.org/10.1145/2535838.2535884},
+ doi = {10.1145/2535838.2535884},
+ acmid = {2535884},
+ publisher = {ACM},
+ address = {New York, NY, USA},
+ keywords = {applicative instantiation, haskell modules, mixin modules, module systems, packages, recursive modules, separate modular development, type systems},
+}
diff --git a/docs/backpack/backpack-impl.tex b/docs/backpack/backpack-impl.tex
new file mode 100644
index 000000000000..963c53c50b51
--- /dev/null
+++ b/docs/backpack/backpack-impl.tex
@@ -0,0 +1,2483 @@
+\documentclass{article}
+
+\usepackage{pifont}
+\usepackage{graphicx} %[pdftex] OR [dvips]
+\usepackage{fullpage}
+\usepackage{wrapfig}
+\usepackage{float}
+\usepackage{titling}
+\usepackage{hyperref}
+\usepackage{tikz}
+\usepackage{color}
+\usepackage{footnote}
+\usepackage{float}
+\usepackage{algorithm}
+\usepackage{algpseudocode}
+\usetikzlibrary{arrows}
+\usetikzlibrary{positioning}
+\setlength{\droptitle}{-6em}
+
+\input{commands-new-new.tex}
+
+\newcommand{\nuAA}{\nu_\mathit{AA}}
+\newcommand{\nuAB}{\nu_\mathit{AB}}
+\newcommand{\nuGA}{\nu_\mathit{GA}}
+\newcommand{\nuGB}{\nu_\mathit{GB}}
+\newcommand{\betaPL}{\beta_\mathit{PL}}
+\newcommand{\betaAA}{\beta_\mathit{AA}}
+\newcommand{\betaAS}{\beta_\mathit{AS}}
+\newcommand{\thinandalso}{\hspace{.45cm}}
+\newcommand{\thinnerandalso}{\hspace{.38cm}}
+
+\input{commands-rebindings.tex}
+
+\newcommand{\var}[1]{\textsf{#1}}
+
+\newcommand{\ghcfile}[1]{\textsl{#1}}
+
+\title{Implementing Backpack}
+
+\begin{document}
+
+\maketitle
+
+The purpose of this document is to describe an implementation path
+for Backpack in GHC\@.
+
+\tableofcontents
+
+\section{What we are trying to solve}
+
+While the current ecosystem has proved itself serviceable for many years,
+there are a number of major problems which causes significant headaches
+for many users. Here are some of them:
+
+\subsection{Package reinstalls are destructive}\label{sec:destructive}
+
+When attempting to install a new package, you might get an error like
+this:
+
+\begin{verbatim}
+$ cabal install hakyll
+cabal: The following packages are likely to be broken by the reinstalls:
+pandoc-1.9.4.5
+Graphalyze-0.14.0.0
+Use --force-reinstalls if you want to install anyway.
+\end{verbatim}
+
+While this error message is understandable if you're really trying to
+reinstall a package, it is quite surprising that it can occur even if
+you didn't ask for any reinstalls!
+
+The underlying cause of this problem is related to an invariant Cabal
+currently enforces on a package database: there can only be one instance
+of a package for any given package name and version. This means that it
+is not possible to install a package multiple times, compiled against
+different dependencies. However, sometimes, reinstalling a package with
+different dependencies is the only way to fulfill version bounds of a
+package! For example: say we have three packages \pname{a}, \pname{b}
+and \pname{c}. \pname{b-1.0} is the only version of \pname{b}
+available, and it has been installed and compiled against \pname{c-1.0}.
+Later, the user installs an updated version \pname{c-1.1} and then
+attempts to install \pname{a}, which depends on the specific versions
+\pname{c-1.1} and \pname{b-1.0}. We \emph{cannot} use the already
+installed version of \pname{b-1.0}, which depends on the wrong version
+of \pname{c}, so our only choice is to reinstall \pname{b-1.0} compiled
+against \pname{c-1.1}. This will break any packages, e.g. \pname{d},
+which were built against the old version of \pname{b-1.0}.
+
+Our solution to this problem is to \emph{abolish} destructive package
+installs, and allow a package to be installed multiple times with the same
+package name and version. However, allowing this poses some interesting
+user interface problems, since package IDs are now no longer unambiguous
+identifiers.
+
+\subsection{Version bounds are often over/under-constrained}
+
+When attempting to install a new package, Cabal might fail in this way:
+
+\begin{verbatim}
+$ cabal install hledger-0.18
+Resolving dependencies...
+cabal: Could not resolve dependencies:
+# pile of output
+\end{verbatim}
+
+There are a number of possible reasons why this could occur, but usually
+it's because some of the packages involved have over-constrained version
+bounds, which are resulting in an unsatisfiable set of constraints (or,
+at least, Cabal gave up backtracking before it found a solution.) To
+add insult to injury, most of the time the bound is nonsense and removing
+it would result in a working compilation. In fact, this situation is
+so common that Cabal has a flag \verb|--allow-newer| which lets you
+override the package upper bounds.
+
+However, the flip-side is when Cabal finds a satisfying set, but your
+compilation fails with a type error. Here, you had an under-constrained
+set of version bounds which didn't actually reflect the compatible
+versions of a package, and Cabal picked a version of the package which
+was incompatible.
+
+Our solution to this problem is to use signatures instead of version
+numbers as the primary mechanism by which compatibility is determined:
+e.g., if it typechecks, it's a valid choice. Version numbers can still
+be used to reflect semantic changes not seen in the types (in
+particular, ruling out buggy versions of a package is a useful
+operation), but these bounds are empirical observations and can be
+collected after-the-fact.
+
+\subsection{It is difficult to support multiple implementations of a type}
+
+This problem is perhaps best described by referring to a particular
+instance of it Haskell's ecosystem: the \texttt{String} data type. Haskell,
+by default, implements strings as linked lists of integers (representing
+characters). Many libraries use \texttt{String}, because it's very
+convenient to program against. However, this representation is also
+very \emph{slow}, so there are alternative implementations such as
+\texttt{Text} which implement efficient, UTF-8 encoded packed byte
+arrays.
+
+Now, suppose you are writing a library and you don't care if the user of
+your library is using \texttt{String} or \texttt{Text}. However, you
+don't want to rewrite your library twice to support both data types:
+rather, you'd like to rely on some \emph{common interface} between the
+two types, and let the user instantiate the implementation. The only
+way to do this in today's Haskell is using type classes; however, this
+necessitates rewriting all type signatures from a nice \texttt{String ->
+String} to \texttt{StringLike s => s -> s}. The result is less readable,
+required a large number of trivial edits to type signatures, and might
+even be less efficient, if GHC does not appropriately specialize your code
+written in this style.
+
+Our solution to this problem is to introduce a new mechanism of
+pluggability: module holes, which let us use types and functions from a
+module \texttt{Data.String} as before, but defer choosing \emph{what}
+module should be used in the implementation to some later point (or
+instantiate the code multiple times with different choices.)
+
+\subsection{Fast moving APIs are difficult to develop/develop against}
+
+Most packages that are uploaded to Hackage have package authors which pay
+some amount of attention to backwards compatibility and avoid making egregious
+breaking changes. However, a package like the \verb|ghc-api| follows a
+very different model: the library is a treated by its developers as an
+internal component of an application (GHC), and is frequently refactored
+in a way that changes its outwards facing interface.
+
+Arguably, an application like GHC should design a stable API and
+maintain backwards compatibility against it. However, this is a lot of
+work (including refactoring) which is only being done slowly, and in the
+meantime, the dump of all the modules gives users the functionality they
+want (even if it keeps breaking every version.)
+
+One could say that the core problem is there is no way for users to
+easily communicate to GHC authors what parts of the API they rely on. A
+developer of GHC who is refactoring an interface will often rely on the
+typechecker to let them know which parts of the codebase they need to
+follow and update, and often could say precisely how to update code to
+use the new interface. User applications, which live out of tree, don't
+receive this level of attention.
+
+Our solution is to make it possible to typecheck the GHC API against a
+signature. Important consumers can publish what subsets of the GHC API
+they rely against, and developers of GHC, as part of their normal build
+process, type-check against these signatures. If the signature breaks,
+a developer can either do the refactoring differently to avoid the
+compatibility-break, or document how to update code to use the new API\@.
+
+\section{Backpack in a nutshell}
+
+For a more in-depth tutorial about Backpack's features, check out Section 2
+of the original Backpack paper. In this section, we briefly review the
+most important points of Backpack's design.
+
+\paragraph{Thinning and renaming at the module level}
+A user can specify a build dependency which only exposes a subset of
+modules (possibly under different names.) By itself, it's a way for the
+user to resolve ambiguous module imports at the package level, without
+having to use the \texttt{PackageImports} syntax extension.
+
+\paragraph{Holes (abstract module definitions)} The core
+component of Backpack's support for \emph{separate modular development}
+is the ability to specify abstract module bindings, or holes, which give
+users of the module an obligation to provide an implementation which
+fulfills the signature of the hole. In this example:
+
+\begin{verbatim}
+package p where
+ A :: [ ... ]
+ B = [ import A; ... ]
+\end{verbatim}
+
+\verb|p| is an \emph{indefinite package}, which cannot be compiled until
+an implementation of \m{A} is provided. However, we can still type check
+\m{B} without any implementation of \m{A}, by type checking it against
+the signature. Holes can be put into signature packages and included
+(depended upon) by other packages to reuse definitions of signatures.
+
+\paragraph{Filling in holes with an implementation}
+A hole in an indefinite package can be instantiated in a \emph{mix-in}
+style: namely, if a signature and an implementation have the same name,
+they are linked together:
+
+\begin{verbatim}
+package q where
+ A = [ ... ]
+ include p -- has signature A
+\end{verbatim}
+
+Renaming is often useful to rename a module (or a hole) so that a signature
+and implementation have the same name and are linked together.
+An indefinite package can be instantiated multiple times with different
+implementations: the \emph{applicativity} of Backpack means that if
+a package is instantiated separately with the same module, the results
+are type equal:
+
+\begin{verbatim}
+package q' where
+ A = [ ... ]
+ include p (A, B as B1)
+ include p (A, B as B2)
+ -- B1 and B2 are equivalent
+\end{verbatim}
+
+\paragraph{Combining signatures together}
+Unlike implementations, it's valid for a multiple signatures with the
+same name to be in scope.
+
+\begin{verbatim}
+package a-sig where
+ A :: [ ... ]
+package a-sig2 where
+ A :: [ ... ]
+package q where
+ include a-sig
+ include a-sig2
+ B = [ import A; ... ]
+\end{verbatim}
+
+These signatures \emph{merge} together, providing the union of the
+functionality (assuming the types of individual entities are
+compatible.) Backpack has a very simple merging algorithm: types must
+match exactly to be compatible (\emph{width} subtyping).
+
+\clearpage
+
+\section{Module and package identity}
+
+\begin{figure}[H]
+\begin{tabular}{p{0.45\textwidth} p{0.45\textwidth}}
+\begin{verbatim}
+package p where
+ A :: [ data X ]
+ P = [ import A; data Y = Y X ]
+package q where
+ A1 = [ data X = X1 ]
+ A2 = [ data X = X2 ]
+ include p (A as A1, P as P1)
+ include p (A as A2, P as P2)
+\end{verbatim}
+&
+\begin{verbatim}
+package p where
+ A :: [ data X ]
+ P = [ data T = T ] -- no A import!
+package q where
+ A1 = [ data X = X1 ]
+ A2 = [ data X = X2 ]
+ include p (A as A1, P as P1)
+ include p (A as A2, P as P2)
+\end{verbatim}
+\\
+(a) Type equality must consider holes\ldots &
+(b) \ldots but how do we track dependencies? \\
+\end{tabular}
+\caption{Two similar examples}\label{fig:simple-ex}
+\end{figure}
+
+One of the central questions one encounters when type checking Haskell
+code is: \emph{when are two types equal}? In ordinary Haskell, the
+answer is simple: ``They are equal if their \emph{original names} (i.e.,
+where they were originally defined) are the same.'' However, in
+Backpack, the situation is murkier due to the presence of \emph{holes}.
+Consider the pair of examples in Figure~\ref{fig:simple-ex}.
+In Figure~\ref{fig:simple-ex}a, the types \m{B1}.\verb|Y| and \m{B2}.\verb|Y| should not be
+considered equal, even though na\"\i vely their original names are
+\pname{p}:\m{B}.\verb|Y|, since their arguments are different \verb|X|'s!
+On the other hand, if we instantiated \pname{p} twice with the same \m{A}
+(e.g., change the second include to \texttt{include p (A as A1, P as P2)}),
+we might consider the two resulting \verb|Y|'s
+equal, an \emph{applicative} semantics of identity instantiation. In
+Figure~\ref{fig:simple-ex}b, we see that even though \m{A} was instantiated differently,
+we might reasonably wonder if \texttt{T} should still be considered the same,
+since it has no dependence on the actual choice of \m{A}.
+
+In fact, there are quite a few different choices that can be made here.
+Figures~\ref{fig:applicativity}~and~\ref{fig:granularity} summarize the various
+choices on two axes: the granularity of applicativity (under what circumstances
+do we consider two types equal) and the granularity of dependency (what circumstances
+do we consider two types not equal)? A \ding{52} means the design we have chosen
+answers the question affirmatively---\ding{54}, negatively---but all of these choices
+are valid points on the design space.
+
+\subsection{The granularity of applicativity}
+
+An applicative semantics of package instantiation states that if a package is
+instantiated with the ``same arguments'', then the resulting entities it defines
+should also be considered equal. Because Backpack uses \emph{mix-in modules},
+it is very natural to consider the arguments of a package instantiation as the
+modules, as shown in Figure~\ref{fig:applicativity}b: the same module \m{A} is
+linked for both instantiations, so \m{P1} and \m{P2} are considered equal.
+
+However, we consider the situation at a finer granularity, we might say, ``Well,
+for a declaration \texttt{data Y = Y X}, only the definition of type \verb|X| matters.
+If they are the same, then \verb|Y| is the same.'' In that case, we might accept
+that in Figure~\ref{fig:applicativity}a, even though \pname{p} is instantiated
+with different modules, at the end of the day, the important component \verb|X| is
+the same in both cases, so \verb|Y| should also be the same. This is a sort of
+``extreme'' view of modular development, where every declaration is desugared
+into a separate module. In our design, we will be a bit more conservative, and
+continue with module level applicativity, in the same manner as Paper Backpack.
+
+\paragraph{Implementation considerations}
+Compiling Figure~\ref{fig:applicativity}b to dynamic libraries poses an
+interesting challenge, if every package compiles to a dynamic library.
+When we compile package \pname{q}, the libraries we end up producing are \pname{q}
+and an instance of \pname{p} (instantiated with \pname{q}:\m{A}). Furthermore,
+\pname{q} refers to code in \pname{p} (the import in \m{Q}), and vice versa (the usage
+of the instantiated hole \m{A}). When building static libraries, this circular
+dependency doesn't matter: when we link the executable, we can resolve all
+of the symbols in one go. However, when the libraries in question are
+dynamic libraries \verb|libHSq.so| and \verb|libHSp(q:A).so|, we now have
+a \emph{circular dependency} between the two dynamic libraries, and most dynamic
+linkers will not be able to load either of these libraries.
+
+To break the circularity in Figure~\ref{fig:applicativity}b, we have to \emph{inline}
+the entire module \m{A} into the instance of \pname{p}. Since the code is exactly
+the same, we can still consider the instance of \m{A} in \pname{q} and in \pname{p}
+type equal. However, in Figure~\ref{fig:applicativity}c, applicativity has been
+done at a coarser level: although we are using Backpack's module mixin syntax,
+morally, this example is filling in the holes with the \emph{package} \pname{a}
+(rather than a module). In this case, we can achieve code sharing, since
+\pname{p} can refer directly to \pname{a}, breaking the circularity.
+
+\newcolumntype{C}{>{\centering\arraybackslash}p{0.3\textwidth}}
+ \begin{savenotes}
+\begin{figure}
+ \begin{tabular}{C C C}
+\begin{verbatim}
+package q where
+ A = [ data X = X ]
+ A1 = [ import A; x = 0 ]
+ A2 = [ import A; x = 1 ]
+ include p (A as A1, P as P1)
+ include p (A as A2, P as P2)
+ Q = [ import P1; ... ]
+\end{verbatim}
+&
+\begin{verbatim}
+package q where
+ A = [ data X = X ]
+
+
+ include p (A, P as P1)
+ include p (A, P as P2)
+ Q = [ import P1; ... ]
+\end{verbatim}
+&
+\begin{verbatim}
+package a where
+ A = [ data X = X ]
+package q where
+ include a
+ include p (A, P as P1)
+ include p (A, P as P2)
+ Q = [ import P1; ... ]
+\end{verbatim}
+ \\
+ (a) Declaration applicativity \ding{54} &
+ (b) Module applicativity \ding{52} &
+ (c) Package applicativity \ding{52} \\
+\end{tabular}
+\caption{Choices of granularity of applicativity on \pname{p}: given \texttt{data Y = Y X}, is \m{P1}.\texttt{Y} equal to \m{P2}.\texttt{Y}?}\label{fig:applicativity}
+\end{figure}
+\end{savenotes}
+
+\subsection{The granularity of dependency}
+
+\begin{savenotes}
+\newcolumntype{C}{>{\centering\arraybackslash}p{0.3\textwidth}}
+\begin{figure}
+ \begin{tabular}{C C C}
+\begin{verbatim}
+package p(A,P) where
+ A :: [ data X ]
+ P = [
+ import A
+ data T = T
+ data Y = Y X
+ ]
+\end{verbatim}
+&
+\begin{verbatim}
+package p(A,P) where
+ A :: [ data X ]
+ B = [ data T = T ]
+ C = [
+ import A
+ data Y = Y X
+ ]
+ P = [
+ import B
+ import C
+ ]
+\end{verbatim}
+&
+\begin{verbatim}
+package b where
+ B = [ data T = T ]
+package c where
+ A :: [ data X ]
+ C = [
+ import A
+ data Y = Y X
+ ]
+package p(A,P) where
+ include b; include c
+ P = [ import B; import C ]
+\end{verbatim}
+ \\
+ (a) Declaration granularity \ding{54} &
+ (b) Module granularity \ding{54} &
+ (c) Package granularity \ding{52} \\
+\end{tabular}
+\caption{Choices of granularity for dependency: is the identity of \texttt{T} independent of how \m{A} is instantiated?}\label{fig:granularity}
+\end{figure}
+
+\end{savenotes}
+
+In the previous section, we considered \emph{what} entities may be considered for
+computing dependency; in this section we consider \emph{which} entities are actually
+considered as part of the dependencies for the declaration/module/package we're writing.
+Figure~\ref{fig:granularity} contains a series of examples which exemplify the choice
+of whether or not to collect dependencies on a per-declaration, per-module or per-package basis:
+
+\begin{itemize}
+ \item Package-level granularity states that the modules in a package are
+considered to depend on \emph{all} of the holes in the package, even if
+the hole is never imported. Figure~\ref{fig:granularity}c is factored so that
+\verb|T| is defined in a distinct package \pname{b} with no holes, so no matter
+the choice of \m{A}, \m{B}.\verb|T| will be the same. On the other hand, in
+Figure~\ref{fig:granularity}b, there is a hole in the package defining \m{B},
+so the identity of \verb|T| will depend on the choice of \m{A}.
+
+\item Module-level granularity states that each module has its own dependency,
+computed by looking at its import statements. In this setting, \verb|T| in Figure~\ref{fig:granularity}b
+is independent of \m{A}, since the hole is never imported in \m{B}. But once again, in
+Figure~\ref{fig:granularity}a, there is an import in the module defining \verb|T|,
+so the identity of \verb|T| once again depends on the choice of \m{A}.
+
+\item Finally, at the finest level of granularity, one could chop up \pname{p} in
+Figure~\ref{fig:granularity}a, looking at the type declaration-level dependency
+to suss out whether or not \verb|T| depends on \m{A}. It doesn't refer to
+anything in \m{A}, so it is always considered the same.
+\end{itemize}
+
+It is well worth noting that the system described by Paper Backpack tracks dependencies per module;
+however, we have decided that we will implement tracking per package instead:
+a coarser grained granularity which accepts less programs.
+
+Is a finer form of granularity \emph{better?} Not necessarily! For
+one, we can always split packages into further subpackages (as was done
+in Figure~\ref{fig:granularity}c) which better reflect the internal hole
+dependencies, so it is always possible to rewrite a program to make it
+typecheck---just with more packages. Additionally, the finer the
+granularity of dependency, the more work I have to do to understand what
+the identities of entities in a module are. In Paper Backpack, I have
+to understand the imports of all modules in a package; with
+declaration-granularity, I have to understand the entire code. This is
+a lot of work for the developer to think about; a more granular model is
+easier to remember and reason about. Finally, package-level granularity
+is much easier to implement, as it preserves the previous compilation
+model, \emph{one library per package}. At a fine level of granularity, we
+may end up repeatedly compiling a module which actually should be considered
+``the same'' as any other instance of it.
+
+Nevertheless, finer granularity can be desirable from an end-user perspective.
+Usually, these circumstances arise when library-writers are forced to split their
+components into many separate packages, when they would much rather have written
+a single package. For example, if I define a data type in my library, and would
+like to define a \verb|Lens| instance for it, I would create a new package just
+for the instance, in order to avoid saddling users who aren't interested in lenses
+with an extra dependency. Another example is test suites, which have dependencies
+on various test frameworks that a user won't care about if they are not planning
+on testing the code. (Cabal has a special case for this, allowing the user
+to write effectively multiple packages in a single Cabal file.)
+
+\subsection{Summary}
+
+We can summarize all of the various schemes by describing the internal data
+types that would be defined by GHC under each regime. First, we have
+the shared data structures, which correspond closely to what users are
+used to seeing:
+
+\begin{verbatim}
+ ::= containers, ...
+ ::= -
+ ::= Data.Set, ...
+ ::= empty, ...
+\end{verbatim}
+
+Changing the \textbf{granularity of applicativity} modifies how we represent the
+list of dependencies associated with an entity. With module applicativity,
+we list module identities (not yet defined); with declaration applicativity
+we actually list the original names (i.e., ids).
+
+\begin{verbatim}
+ ::= , ... # Declaration applicativity
+ ::= , ... # Module applicativity
+\end{verbatim}
+
+Changing the \textbf{granularity of dependency} affects how we compute
+the lists of dependencies, and what entities are well defined:
+
+\begin{verbatim}
+# Package-level granularity
+ ::= hash( + )
+ ::= :
+ ::= .
+
+# Module-level granularity
+ not defined
+ ::= hash( : + )
+ ::= .
+
+# Declaration-level granularity
+ not defined
+ not defined
+ ::= hash( : . + )
+\end{verbatim}
+
+Notice that as we increase the granularity, the notion of a ``package'' and a ``module''
+become undefined. This is because, for example, with module-level granularity, a single
+``package'' may result in several modules, each of which have different sets of
+dependencies. It doesn't make much sense to refer to the package as a monolithic entity,
+because the point of splitting up the dependencies was so that if a user relies only
+on a single module, it has a correspondingly restricted set of dependencies.
+\subsection{The new scheme, formally}
+
+\begin{wrapfigure}{R}{0.5\textwidth}
+\begin{myfig}
+\[
+\begin{array}{@{}lr@{\;}c@{\;}l@{}}
+ \text{Package Names (\texttt{PkgName})} & P &\in& \mathit{PkgNames} \\
+ \text{Module Path Names (\texttt{ModName})} & p &\in& \mathit{ModPaths} \\
+ \text{Module Identity Vars} & \alpha,\beta &\in& \mathit{IdentVars} \\
+ \text{Package Key (\texttt{PackageKey})} & \K &::=& P(\vec{p\mapsto\nu}) \\
+ \text{Module Identities (\texttt{Module})} & \nu &::=&
+ \alpha ~|~
+ \K\colon\! p \\
+ \text{Module Identity Substs} & \phi,\theta &::=&
+ \{\vec{\alpha \coloneqq \nu}\} \\
+\end{array}
+\]
+\caption{Module Identities}
+\label{fig:mod-idents}
+\end{myfig}
+\end{wrapfigure}
+
+In this section, we give a formal treatment of our choice in the design space, in the
+same style as the Backpack paper, but omitting mutual recursion, as it follows straightforwardly.
+Physical module
+identities $\nu$, the \texttt{Module} component of \emph{original names} in GHC, are either (1) \emph{variables} $\alpha$, which are
+used to represent holes\footnote{In practice, these will just be fresh paths in a special package key for variables.} or (2) a concrete module $p$ defined in package
+$P$, with holes instantiated with other module identities (might be
+empty)\footnote{In Paper Backpack, we would refer to just $P$:$p$ as the identity
+constructor. However, we've written the subterms specifically next to $P$ to highlight the semantic difference of these terms.}.
+
+As in traditional Haskell, every package contains a number of module
+files at some module path $p$; within a package these paths are
+guaranteed to be unique.\footnote{In Paper Backpack, the module expressions themselves are used to refer to globally unique identifiers for each literal. This makes the metatheory simpler, but for implementation purposes it is convenient to conflate the \emph{original} module path that a module is defined at with its physical identity.} When we write inline module definitions, we assume
+that they are immediately assigned to a module path $p$ which is incorporated
+into their identity. A module identity $\nu$ simply augments this
+with subterms $\vec{p\mapsto\nu}$ representing how \emph{all} holes in the package $P$
+were instantiated.\footnote{In Paper Backpack, we do not distinguish between holes/non-holes, and we consider all imports of the \emph{module}, not the package.} This naming is stable because the current Backpack surface syntax does not allow a logical path in a package
+to be undefined. A package key is $P(\vec{p\mapsto\nu})$.
+
+Here is the very first example from
+Section 2 of the original Backpack paper, \pname{ab-1}:
+
+\begin{example}
+\Pdef{ab-1}{
+ \Pmod{A}{x = True}
+ \Pmod{B}{\Mimp{A}; y = not x}
+ % \Pmodd{C}{\mname{A}}
+}
+\end{example}
+
+The identities of \m{A} and \m{B} are
+\pname{ab-1}:\mname{A} and \pname{ab-1}:\mname{B}, respectively.\footnote{In Paper Backpack, the identity for \mname{B} records its import of \mname{A}, but since it is definite, this is strictly redundant.} In a package with holes, each
+hole (within the package definition) gets a fresh variable as its
+identity, and all of the holes associated with package $P$ are recorded. Consider \pname{abcd-holes-1}:
+
+\begin{example}
+\Pdef{abcd-holes-1}{
+ \Psig{A}{x :: Bool} % chktex 26
+ \Psig{B}{y :: Bool} % chktex 26
+ \Pmod{C}{x = False}
+ \Pmodbig{D}{
+ \Mimpq{A}\\
+ \Mimpq{C}\\
+ % \Mexp{\m{A}.x, z}\\
+ z = \m{A}.x \&\& \m{C}.x
+ }
+}
+\end{example}
+
+The identities of the four modules
+are, in order, $\alpha_a$, $\alpha_b$, $\pname{abcd-holes-1}(\alpha_a,\alpha_b)$:\mname{C}, and
+$\pname{abcd-holes-1}(\alpha_a,\alpha_b)$:\mname{D}.\footnote{In Paper Backpack, the granularity is at the module level, so the subterms of \mname{C} and \mname{D} can differ.} We include both $\alpha_a$ and $\alpha_b$ in both \mname{C} and \mname{D}, regardless of the imports. When we link the package against an implementation of the hole, these variables are replaced with the identities of the modules we linked against.
+
+Shaping proceeds in the same way as in Paper Backpack, except that the
+shaping judgment must also accept the package key
+$P(\vec{p\mapsto\alpha})$ so we can create identifiers with
+\textsf{mkident}. This implies we must know ahead of time what the holes
+of a package are.
+
+\paragraph{A full Backpack comparison}
+If you're curious about how the rest of the Backpack examples translate,
+look no further than this section.
+
+First, consider the module identities in the \m{Graph} instantiations in
+\pname{multinst}, shown in Figure 2 of the original Backpack paper.
+In the definition of \pname{structures}, assume that the variables for
+\m{Prelude} and \m{Array} are $\alpha_P$ and $\alpha_A$ respectively.
+The identity of \m{Graph} is $\pname{structures}(\alpha_P, \alpha_A)$:\m{Graph}. Similarly, the identities of the two array implementations
+are $\nu_{AA} = \pname{arrays-a}(\alpha_P)$:\m{Array} and
+$\nu_{AB} = \pname{arrays-b}(\alpha_P)$:\m{Array}.\footnote{Notice that the subterms coincide with Paper Backpack! A sign that module level granularity is not necessary for many use-cases.}
+
+The package \pname{graph-a} is more interesting because it
+\emph{links} the packages \pname{arrays-a} and \pname{structures}
+together, with the implementation of \m{Array} from \pname{arrays-a}
+\emph{instantiating} the hole \m{Array} from \pname{structures}. This
+linking is reflected in the identity of the \m{Graph} module in
+\pname{graph-a}: whereas in \pname{structures} it was $\nu_G =
+\pname{structures}(\alpha_P, \alpha_A)$:\m{Graph}, in \pname{graph-a} it is
+$\nu_{GA} = \nu_G[\nu_{AA}/\alpha_A] = \pname{structures}(\alpha_P, \nu_{AA})$:\m{Graph}. Similarly, the identity of \m{Graph} in
+\pname{graph-b} is $\nu_{GB} = \nu_G[\nu_{AB}/\alpha_A] =
+\pname{structures}(\alpha_P, \nu_{AB})$:\m{Graph}. Thus, linking consists
+of substituting the variable identity of a hole by the concrete
+identity of the module filling that hole.
+
+Lastly, \pname{multinst} makes use of both of these \m{Graph}
+modules, under the aliases \m{GA} and \m{GB}, respectively.
+Consequently, in the \m{Client} module, \code{\m{GA}.G} and
+\code{\m{GB}.G} will be correctly viewed as distinct types since they
+originate in modules with distinct identities.
+
+As \pname{multinst} illustrates, module identities effectively encode
+dependency graphs at the package level.\footnote{In Paper Backpack, module identities
+encode dependency graphs at the module level. In both cases, however, what is being
+depended on is always a module.} Like in Paper Backpack, we have an \emph{applicative}
+semantics of instantiation, and the applicativity example in Figure 3 of the
+Backpack paper still type checks. However, because we are operating at a coarser
+granularity, modules may have spurious dependencies on holes that they don't
+actually depend on, which means less type equalities may hold.
+
+
+\subsection{Cabal dependency resolution}
+
+Currently, when we compile a Cabal
+package, Cabal goes ahead and resolves \verb|build-depends| entries with actual
+implementations, which we compile against. A planned addition to the package key,
+independent of Backpack, is to record the transitive dependency tree selected
+during this dependency resolution process, so that we can install \pname{libfoo-1.0}
+twice compiled against different versions of its dependencies.
+What is the relationship to this transitive dependency tree of \emph{packages},
+with the subterms of our package identities which are \emph{modules}? Does one
+subsume the other? In fact, these are separate mechanisms---two levels of indirections,
+so to speak.
+
+To illustrate, suppose I write a Cabal file with \verb|build-depends: foobar|. A reasonable assumption is that this translates into a
+Backpack package which has \verb|include foobar|. However, this is not
+actually a Paper Backpack package: Cabal's dependency solver has to
+rewrite all of these package references into versioned references
+\verb|include foobar-0.1|. For example, this is a pre-package:
+
+\begin{verbatim}
+package foo where
+ include bar
+\end{verbatim}
+
+and this is a Paper Backpack package:
+
+\begin{verbatim}
+package foo-0.3[bar-0.1[baz-0.2]] where
+ include bar-0.1[baz-0.2]
+\end{verbatim}
+
+This tree is very similar to the one tracking dependencies for holes,
+but we must record this tree \emph{even} when our package has no holes.
+% As a final example, the full module
+% identity of \m{B1} in Figure~\ref{fig:granularity} may actually be $\pname{p-0.9(q-1.0[p-0.9]:A1)}$:\m{B}.
+
+\paragraph{Linker symbols} As we increase the amount of information in
+PackageId, it's important to be careful about the length of these IDs,
+as they are used for exported linker symbols (e.g.
+\verb|base_TextziReadziLex_zdwvalDig_info|). Very long symbol names
+hurt compile and link time, object file sizes, GHCi startup time,
+dynamic linking, and make gdb hard to use. As such, we are going to
+do away with full package names and versions and instead use just a
+base-62 encoded hash, with the first five characters of the package
+name for user-friendliness.
+
+\subsection{Package selection}
+
+When I fire up \texttt{ghci} with no arguments, GHC somehow creates
+out of thin air some consistent set of packages, whose modules I can
+load using \texttt{:m}. This functionality is extremely handy for
+exploratory work, but actually GHC has to work quite hard in order
+to generate this set of packages, the contents of which are all
+dumped into a global namespace. For example, GHC doesn't have access
+to Cabal's dependency solver, nor does it know \emph{which} packages
+the user is going to ask for, so it can't just run a constraint solver,
+get a set of consistent packages to offer and provide them to the user.\footnote{Some might
+argue that depending on a global environment in this fashion is wrong, because
+when you perform a build in this way, you have absolutely no ideas what
+dependencies you actually ended up using. But the fact remains that for
+end users, this functionality is very useful.}
+
+To make matters worse, while in the current design of the package database,
+a package is uniquely identified by its package name and version, in
+the Backpack design, it is \emph{mandatory} that we support multiple
+packages installed in the database with the same package name and version,
+and this can result in complications in the user model. This further
+complicates GHC's default package selection algorithm.
+
+In this section, we describe how the current algorithm operates (including
+what invariants it tries to uphold and where it goes wrong), and how
+to replace the algorithm to handle generalization to
+multiple instances in the package database. We'll also try to tease
+apart the relationship between package keys and installed package IDs in
+the database.
+
+\paragraph{The current algorithm} Abstractly, GHC's current package
+selection algorithm operates as follows. For every package name, select
+the package with the latest version (recall that this is unique) which
+is also \emph{valid}. A package is valid if:
+
+\begin{itemize}
+ \item It exists in the package database,
+ \item All of its dependencies are valid,
+ \item It is not shadowed by a package with the same package ID\footnote{Recall that currently, a package ID uniquely identifies a package in the package database} in
+ another package database (unless it is in the transitive closure
+ of a package named by \texttt{-package-id}), and
+ \item It is not ignored with \texttt{-ignore-package}.
+\end{itemize}
+
+Package validity is probably the minimal criterion for to GHC to ensure
+that it can actually \emph{use} a package. If the package is missing,
+GHC can't find the interface files or object code associated with the
+package. Ignoring packages is a way of pretending that a package is
+missing from the database.
+
+Package validity is also a very weak criterion. Another criterion we
+might hope holds is \emph{consistency}: when we consider the transitive
+closure of all selected packages, for any given package name, there
+should only be one instance providing that package. It is trivially
+easy to break this property: suppose that I have packages \pname{a-1.0},
+\pname{b-1.0} compiled against \pname{a-1.0}, and \pname{a-1.1}. GHC
+will happily load \pname{b-1.0} and \pname{a-1.1} together in the same
+interactive session (they are both valid and the latest versions), even
+though \pname{b-1.0}'s dependency is inconsistent with another package
+that was loaded. The user will notice if they attempt to treat entities
+from \pname{a} reexported by \pname{b-1.0} and entities from
+\pname{a-1.1} as type equal. Here is one user who had this problem:
+\url{http://stackoverflow.com/questions/12576817/}. In some cases, the
+problem is easy to work around (there is only one offending package
+which just needs to be hidden), but if the divergence is deep in two
+separate dependency hierarchies, it is often easier to just blow away
+the package database and try again.
+
+Perversely, destructive reinstallation helps prevent these sorts of
+inconsistent databases. While inconsistencies can arise when multiple
+versions of a package are installed, multiple versions will frequently
+lead to the necessity of reinstalls. In the previous example, if a user
+attempts to Cabal install a package which depends on \pname{a-1.1} and
+\pname{b-1.0}, Cabal's dependency solver will propose reinstalling
+\pname{b-1.0} compiled against \pname{a-1.1}, in order to get a
+consistent set of dependencies. If this reinstall is accepted, we
+invalidate all packages in the database which were previously installed
+against \pname{b-1.0} and \pname{a-1.0}, excluding them from GHC's
+selection process and making it more likely that the user will see a
+consistent view of the database.
+
+\paragraph{Enforcing consistent dependencies} From the user's
+perspective, it would be desirable if GHC never loaded a set of packages
+whose dependencies were inconsistent.
+There are two ways we can go
+about doing this. First, we can improve GHC's logic so that it doesn't
+pick an inconsistent set. However, as a point of design, we'd like to
+keep whatever resolution GHC does as simple as possible (in an ideal
+world, we'd skip the validity checks entirely, but they ended up being
+necessary to prevent broken database from stopping GHC from starting up
+at all). In particular, GHC should \emph{not} learn how to do
+backtracking constraint solving: that's in the domain of Cabal. Second,
+we can modify the logic of Cabal to enforce that the package database is
+always kept in a consistent state, similar to the consistency check
+Cabal applies to sandboxes, where it refuses to install a package to a
+sandbox if the resulting dependencies would not be consistent.
+
+The second alternative is a appealing, but Cabal sandboxes are currently
+designed for small, self-contained single projects, as opposed to the
+global ``universe'' that a default environment is intended to provide.
+For example, with a Cabal sandbox environment, it's impossible to
+upgrade a dependency to a new version without blowing away the sandbox
+and starting again. To support upgrades, Cabal needs to do some work:
+when a new version is put in the default set, all of the
+reverse-dependencies of the old version are now inconsistent. Cabal
+should offer to hide these packages or reinstall them compiled against
+the latest version. Furthermore, because we in general may not have write
+access to all visible package databases, this visibility information
+must be independent of the package databases themselves.
+
+As a nice bonus, Cabal should also be able to snapshot the older
+environment which captures the state of the universe prior to the
+installation, in case the user wants to revert back.
+
+\paragraph{Modifying the default environment} Currently, after GHC
+calculates the default package environment, a user may further modify
+the environment by passing package flags to GHC, which can be used to
+explicitly hide or expose packages. How do these flags interact with
+our Cabal-managed environments? Hiding packages is simple enough,
+but exposing packages is a bit dicier. If a user asks for a different
+version of a package than in the default set, it will probably be
+inconsistent with the rest of the dependencies. Cabal would have to
+be consulted to figure out a maximal set of consistent packages with
+the constraints given. Alternatively, we could just supply the package
+with no claims of consistency.
+
+However, this use-case is rare. Usually, it's not because they want a
+specific version: the package is hidden simply because we're not
+interested in loading it by default (\pname{ghc-api} is the canonical
+example, since it dumps a lot of modules in the top level namespace).
+If we distinguish packages which are consistent but hidden, their
+loads can be handled appropriately.
+
+\paragraph{Consistency in Backpack} We have stated as an implicit
+assumption that if we have both \pname{foo-1.0} and \pname{foo-1.1}
+available, only one should be loaded at a time. What are the
+consequences if both of these packages are loaded at the same time? An
+import of \m{Data.Foo} provided by both packages would be ambiguous and
+the user might find some type equalities they expect to hold would not.
+However, the result is not \emph{unsound}: indeed, we might imagine a
+user purposely wanting two different versions of a library in the same
+program, renaming the modules they provided so that they could be
+referred to unambiguously. As another example, suppose that we have an
+indefinite package with a hole that is instantiated multiple times. In
+this case, a user absolutely may want to refer to both instantiations,
+once again renaming modules so that they have unique names.
+
+There are two consequences of this. First, while the default package
+set may enforce consistency, a user should still be able to explicitly
+ask for a package instance, renamed so that its modules don't conflict,
+and then use it in their program. Second, instantiated indefinite packages
+should \emph{never} be placed in the default set, since it's impossible
+to know which instantiation is the one the user prefers. A definite package
+can reexport an instantiated module under an unambiguous name if the user
+so pleases.
+
+\paragraph{Shadowing, installed package IDs, ABI hashes and package
+keys} Shadowing plays an important role for maintaining the soundness of
+compilation; call this the \emph{compatibility} of the package set. The
+problem it addresses is when there are two distinct implementations of a
+module, but because their package ID (or package key, in the new world
+order) are the same, they are considered type equal. It is absolutely
+wrong for a single program to include both implementations
+simultaneously (the symbols would conflict and GHC would incorrectly
+conclude things were type equal when they're not), so \emph{shadowing}'s
+job is to ensure that only one instance is picked, and all the other
+instances considered invalid (and their reverse-dependencies, etc.)
+Recall that in current GHC, within a package database, a package
+instance is uniquely identified by its package ID\@; thus, shadowing
+only needs to take place between package databases. An interesting
+corner case is when the same package ID occurs in both databases, but
+the installed package IDs are the \emph{same}. Because the installed
+package ID is currently simply an ABI hash, we skip shadowing, because
+the packages are---in principle---interchangeable.
+
+There are currently a number of proposed changes to this state of affairs:
+
+\begin{itemize}
+ \item Change installed package IDs to not be based on ABI hashes.
+ ABI hashes have a number of disadvantages as identifiers for
+ packages in the database. First, they cannot be computed until
+ after compilation, which gave the multi-instance GSoC project a
+ few years some headaches. Second, it's not really true that
+ programs with identical ABI hashes are interchangeable: a new
+ package may be ABI compatible but have different semantics.
+ Thus, installed package IDs are a poor unique identifier for
+ packages in the package database. However, because GHC does not
+ give ABI stability guarantees, it would not be possible to
+ assume from here that packages with the same installed package
+ ID are ABI compatible.
+
+ \item Relaxing the uniqueness constraint on package IDs. There are
+ actually two things that could be done here. First, since we
+ have augmented package IDs with dependency resolution
+ information to form package keys, we could simply state that
+ package keys uniquely identify a package in a database.
+ Shadowing rules can be implemented in the same way as before, by
+ preferring the instance topmost on the stack. Second, we could
+ also allow \emph{same-database} shadowing: that is, not even
+ package keys are guaranteed to be unique in a database: instead,
+ installed package IDs are the sole unique identifier of a
+ package. This architecture is Nix inspired, as the intent is
+ to keep all package information in a centralized database.
+\end{itemize}
+
+Without mandatory package environments, same-database shadowing is a bad
+idea, because GHC now has no idea how to resolve shadowing. Conflicting
+installed package IDs can be simulated by placing them in multiple
+package databases (in principle, the databases can be concatenated together
+and treated as a single monolitic database.)
+
+\section{Shapeless Backpack}\label{sec:simplifying-backpack}
+
+Backpack as currently defined always requires a \emph{shaping} pass,
+which calculates the shapes of all modules defined in a package.
+The shaping pass is critical to the solution of the double-vision problem
+in recursive module linking, but it also presents a number of unpalatable
+implementation problems:
+
+\begin{itemize}
+
+ \item \emph{Shaping is a lot of work.} A module shape specifies the
+ providence of all data types and identifiers defined by a
+ module. To calculate this, we must preprocess and parse all
+ modules, even before we do the type-checking pass. (Fortunately,
+ shaping doesn't require a full parse of a module, only enough
+ to get identifiers. However, it does have to understand import
+ statements at the same level of detail as GHC's renamer.)
+
+ \item \emph{Shaping must be done upfront.} In the current Backpack
+ design, all shapes must be computed before any typechecking can
+ occur. While performing the shaping pass upfront is necessary
+ in order to solve the double vision problem (where a module
+ identity may be influenced by later definitions), it means
+ that GHC must first do a shaping pass, and then revisit every module and
+ compile them proper. Nor is it (easily) possible to skip the
+ shaping pass when it is unnecessary, as one might expect to be
+ the case in the absence of mutual recursion. Shaping is not
+ a ``pay as you go'' language feature.
+
+ \item \emph{GHC can't compile all programs shaping accepts.} Shaping
+ accepts programs that GHC, with its current hs-boot mechanism, cannot
+ compile. In particular, GHC requires that any data type or function
+ in a signature actually be \emph{defined} in the module corresponding
+ to that file (i.e., an original name can be assigned to these entities
+ immediately.) Shaping permits unrestricted exports to implement
+ modules; this shows up in the formalism as $\beta$ module variables.
+
+ \item \emph{Shaping encourages inefficient program organization.}
+ Shaping is designed to enable mutually recursive modules, but as
+ currently implemented, mutual recursion is less efficient than
+ code without recursive dependencies. Programmers should avoid
+ this code organization, except when it is absolutely necessary.
+
+ \item \emph{GHC is architecturally ill-suited for directly
+ implementing shaping.} Shaping implies that GHC's internal
+ concept of an ``original name'' be extended to accommodate
+ module variables. This is an extremely invasive change to all
+ aspects of GHC, since the original names assumption is baked
+ quite deeply into the compiler. Plausible implementations of
+ shaping requires all these variables to be skolemized outside
+ of GHC\@.
+
+\end{itemize}
+
+To be clear, the shaping pass is fundamentally necessary for some
+Backpack packages. Here is the example which convinced Simon:
+
+\begin{verbatim}
+package p where
+ A :: [data T; f :: T -> T]
+ B = [export T(MkT), h; import A(f); data T = MkT; h x = f MkT]
+ A = [export T(MkT), f, h; import B; f MkT = MkT]
+\end{verbatim}
+
+The key to this example is that B \emph{may or may not typecheck} depending
+on the definition of A. Because A reexports B's definition T, B will
+typecheck; but if A defined T on its own, B would not typecheck. Thus,
+we \emph{cannot} typecheck B until we have done some analysis of A (the
+shaping analysis!)
+
+Thus, it is beneficial (from an optimization point of view) to
+consider a subset of Backpack for which shaping is not necessary.
+Here is a programming discipline which does just that, which we will call the \textbf{linking restriction}: \emph{Module implementations must be declared before
+signatures.} Formally, this restriction modifies the rule for merging
+polarized module shapes ($\widetilde{\tau}_1^{m_1} \oplus \widetilde{\tau}_2^{m_2}$) so that
+$\widetilde{\tau}_1^- \oplus \widetilde{\tau}_2^+$ is always undefined.\footnote{This seemed to be the crispest way of defining the restriction, although this means an error happens a bit later than I'd like it to: I'd prefer if we errored while merging logical contexts, but we don't know what is a hole at that point.}
+
+Here is an example of the linking restriction. Consider these two packages:
+
+\begin{verbatim}
+package random where
+ System.Random = [ ... ].hs
+
+package monte-carlo where
+ System.Random :: ...
+ System.MonteCarlo = [ ... ].hs
+\end{verbatim}
+
+Here, random is a definite package which may have been compiled ahead
+of time; monte-carlo is an indefinite package with a dependency
+on any package which provides \verb|System.Random|.
+
+Now, to link these two applications together, only one ordering
+is permissible:
+
+\begin{verbatim}
+package myapp where
+ include random
+ include monte-carlo
+\end{verbatim}
+
+If myapp wants to provide its own random implementation, it can do so:
+
+\begin{verbatim}
+package myapp2 where
+ System.Random = [ ... ].hs
+ include monte-carlo
+\end{verbatim}
+
+In both cases, all of \verb|monte-carlo|'s holes have been filled in by the time
+it is included. The alternate ordering is not allowed.
+
+Why does this discipline prevent mutually recursive modules? Intuitively,
+a hole is the mechanism by which we can refer to an implementation
+before it is defined; otherwise, we can only refer to definitions which
+preceed our definition. If there are never any holes \emph{which get filled},
+implementation links can only go backwards, ruling out circularity.
+
+It's easy to see how mutual recursion can occur if we break this discipline:
+
+\begin{verbatim}
+package myapp2 where
+ include monte-carlo
+ System.Random = [ import System.MonteCarlo ].hs
+\end{verbatim}
+
+\subsection{Typechecking of definite modules without shaping}
+
+If we are not carrying out a shaping pass, we need to be able to calculate
+$\widetilde{\Xi}_{\mathsf{pkg}}$ on the fly. In the case that we are
+compiling a package---there will be no holes in the final package---we
+can show that shaping is unnecessary quite easily, since with the
+linking restriction, everything is definite from the get-go.
+
+Observe the following invariant: at any given step of the module
+bindings, the physical context $\widetilde{\Phi}$ contains no
+holes. We can thus conclude that there are no module variables in any
+type shapes. As the only time a previously calculated package shape can
+change is due to unification, the incrementally computed shape is in
+fact the true one.
+
+As far as the implementation is concerned, we never have to worry
+about handling module variables; we only need to do extra typechecks
+against (renamed) interface files.
+
+\subsection{Compiling definite packages}\label{sec:compiling}
+
+% New definitions
+\algnewcommand\algorithmicswitch{\textbf{switch}}
+\algnewcommand\algorithmiccase{\textbf{case}}
+\algnewcommand\algorithmicassert{\texttt{assert}}
+% New "environments"
+\algdef{SE}[SWITCH]{Switch}{EndSwitch}[1]{\algorithmicswitch\ #1\ \algorithmicdo}{\algorithmicend\ \algorithmicswitch}%
+\algdef{SE}[CASE]{Case}{EndCase}[1]{\algorithmiccase\ ``#1''}{\algorithmicend\ \algorithmiccase}%
+\algtext*{EndSwitch}%
+\algtext*{EndCase}%
+
+\begin{algorithm}
+ \caption{Compilation of definite packages (assume \texttt{-hide-all-packages} on all \texttt{ghc} invocations)}\label{alg:compile}
+\begin{algorithmic}
+ \Procedure{Compile}{\textbf{package} $P$ \textbf{where} $\vec{B}$, $H$, $db$}\Comment{}$H$ maps hole module names to identities
+ \State$flags\gets \nil$
+ \State$\mathcal{K}$ $\gets$ \Call{Hash}{$P + H$}
+ \State%
+ In-place register the package $\mathcal{K}$ in $db$
+ \For{$B$ \textbf{in} $\vec{B}$}
+ \Case{$p = p\texttt{.hs}$}
+ \State\Call{Exec}{\texttt{ghc -c} $p$\texttt{.hs} \texttt{-package-db} $db$ \texttt{-package-name} $\mathcal{K}$ $flags$}
+ \EndCase%
+ \Case{$p$ $\cc$ $p$\texttt{.hsig}}
+ \State\Call{Exec}{\texttt{ghc -c} $p$\texttt{.hsig} \texttt{-package-db} $db$ \texttt{--sig-of} $H(p)$ $flags$}
+ \EndCase%
+ \Case{$p = p'$}
+ \State$flags\gets flags$ \texttt{-alias} $p$ $p'$
+ \EndCase%
+ \Case{\Cinc{$P'$} $\langle\vec{p_H\mapsto p_H'}, \vec{p\mapsto p'} \rangle$}
+ \State\textbf{let} $H'(p_H) = $ \Call{Exec}{\texttt{ghc --resolve-module} $p_H'$ \texttt{-package-db} $db$ $flags$}
+ \State$\mathcal{K}'\gets$ \Call{Compile}{$P'$, $H'$, $db$}\Comment{}Nota bene: not $flags$
+ \State$flags\gets flags$ \texttt{-package} $\mathcal{K}'$ $\langle\vec{p\mapsto p'}\rangle$
+ \EndCase%
+ \EndFor%
+ \State%
+ Remove $\mathcal{K}$ from $db$
+ \State%
+ Install the complete package $\mathcal{K}$ to the global database
+ \State\Return$\mathcal{K}$
+ \EndProcedure%
+\end{algorithmic}
+\end{algorithm}
+
+The full recursive procedure for compiling a Backpack package using
+one-shot compilation is given in Figure~\ref{alg:compile}. We
+recursively walk through Backpack descriptions, processing each line by
+invoking GHC and/or modifying our package state. Here is a more
+in-depth description of the algorithm, line-by-line:
+
+\paragraph{The parameters} To compile a package description for package
+$P$, we need to know $H$, the mapping of holes $p_H$ in package $P$ to
+physical module identities $\nu$ which are implementing them; this
+mapping is used to calculate the package key $\mathcal{K}$ for the
+package in question. Furthermore, we have an inplace package database
+$db$ in which we will register intermediate build results, including
+partially compiled parent packages which may provide implementations
+of holes for packages they include.
+
+\subsection{Compiling implementations}
+
+We compile modules in the same way we do today, but with some extra
+package visibility $flags$, which let GHC know how to resolve imports
+and look up original names. We'll describe what the new flags are
+and also discuss some subtleties with module lookup.
+
+\paragraph{In-place registration} Perhaps surprisingly, we start
+compilation by registering the (uncompiled) package in the in-place
+package database. This registration does not expose packages, and is
+purely intended to inform the compilation of subpackages where to
+find modules that are provided by the parent (in-progress) package,
+as well as provide auxiliary information, e.g., such as the package name
+and version for error reporting. The pre-registration trick is an old
+one used by the GHC build system; the key invariant to look out for
+is that we shouldn't reference original names in modules that haven't
+been built yet. This is enforced by our manual tracking of holes in
+$H$: a module can't occur in $H$ unless it's already been compiled!
+
+\paragraph{New package resolution algorithm} Currently, invocations
+of \texttt{-package} and similar flags have the result of \emph{hiding}
+other exposed packages with the same name. However, this is not going
+to work for Backpack: an indefinite package may get loaded multiple times
+with different instantiations, and it might even make sense to load multiple
+versions of the same package simultaneously, as long as their modules
+are renamed to not conflict.
+
+Thus, we impose the following behavior change: when
+\texttt{-hide-all-packages} is specified, we do \emph{not} automatically
+hide packages with the same name as a package specified by
+\texttt{-package} (or a similar flag): they are all included, even if
+there are conflicts. To deal with conflicts, we augment the syntax of
+\texttt{-package} to also accept a list of thinnings and renamings, e.g.
+\texttt{-package} \pname{containers} $\langle\m{Data.Set},
+\m{Data.Map}\mapsto \m{Map}\rangle$ says to make visible for import
+\m{Data.Set} and \m{Map} (which is \m{Data.Map} renamed.) This means
+that
+\texttt{-package} \pname{containers-0.9} $\langle\m{Data.Set}\mapsto
+\m{Set09}\rangle$ \texttt{-package} \pname{containers-0.8}
+$\langle\m{Data.Set}\mapsto \m{Set08}\rangle$ now uses both packages
+concurrently (previously, GHC would hide one of them.)
+
+Additionally, it's important to note that two packages exporting the
+same module do not \emph{necessarily} cause a conflict; the modules
+may be linkable. For example, \texttt{-package} \pname{containers} $\langle\m{Data.Set}\rangle$
+\texttt{-package} \pname{containers} $\langle\m{Data.Set}\rangle$ is fine, because
+precisely the same implementation of \m{Data.Set} is loaded in both cases.
+A similar situation can occur with signatures:
+
+\begin{verbatim}
+package p where
+ A :: [ x :: Int ]
+package q
+ include p
+ A :: [ y :: Int ]
+ B = [ import A; z = x + y ] -- *
+package r where
+ A = [ x = 0; y = 0 ]
+ include q
+\end{verbatim}
+
+Here, both \pname{p} and \pname{q} are visible when compiling the starred
+module, which compiles with the flags \texttt{-package} \pname{p}, but there
+are two interface files available: one available locally, and one from \pname{p}.
+Both of these interface files are \emph{forwarding} to the original implementation
+\pname{r} (more on this in the ``Compiling signatures'' file), so rather than
+reporting an ambiguous import, we instead have to merge the two interface files
+together and use the result as the interface for the module. (This could be done
+on the fly, or we could generate merged interface files as we go along.)
+
+Note that we do not need to merge signatures with an implementation, in such
+cases, we should just use the implementation interface. E.g.
+
+\begin{verbatim}
+package p where
+ A :: ...
+package q where
+ A = ...
+ include p
+ B = [ import A ] -- *
+\end{verbatim}
+
+Here, \m{A} is available both from \pname{p} and \pname{q}, but the use in the
+starred module should be done with respect to the full implementation.
+
+\paragraph{The \texttt{-alias} flag} We introduce a new flag
+\texttt{-alias} for aliasing modules. Aliasing is analogous to
+the merging that can occur when we include packages, but it also applies
+to modules which are locally defined. When we alias a module $p$ with
+$p'$, we require that $p'$ exists in the current module mapping, and then
+we attempt to add an entry for it at entry $p$. If there is no mapping for
+$p$, this succeeds; otherwise, we apply the same conflict resolution algorithm.
+
+\subsection{Compiling signatures}
+
+Signature compilation is triggered when we compile a signature file.
+This mode similar to how we process \verb|hs-boot| files, except
+we pass an extra flag \verb|--sig-of| which specifies what the
+identity of the actual implementation of the signature is (according to our $H$
+mapping). This is guaranteed to exist, due to the linking
+restriction, although it may be in a partially registered package
+in $db$. If the module is \emph{not} exposed under the name of the
+\texttt{hisig}file, we output an \texttt{hisig} file which, for all declarations the
+signature exposes, forwards their definitions to the original
+implementation file. The intent is that any code in the current package
+which compiles against this signature will use this \texttt{hisig} file,
+not the original one \texttt{hi} file.
+For example, the \texttt{hisig} file produced when compiling the starred interface
+points to the implementation in package \pname{q}.
+
+\begin{verbatim}
+package p where
+ A :: ... -- *
+ B = [ import A; ... ]
+package q where
+ A = [ ... ]
+ include p
+\end{verbatim}
+
+\paragraph{Sometimes \texttt{hisig} is unnecessary}
+In the following package:
+
+\begin{verbatim}
+package p where
+ P = ...
+ P :: ...
+\end{verbatim}
+
+Paper Backpack specifies that we check the signature \m{P} against implementation
+\m{P}, but otherwise no changes are made (i.e., the signature does not narrow
+the implementation.) In this case, it is not necessary to generate an \texttt{hisig} file;
+the original interface file suffices.
+
+\paragraph{Multiple signatures} As a simplification, we assume that there
+is only one signature per logical name in a package. (This prevents
+us from expressing mutual recursion in signatures, but let's not worry
+about it for now.)
+
+\paragraph{Restricted recursive modules ala hs-boot}\label{sec:hs-boot-restrict}
+When we compile an \texttt{hsig} file without any \texttt{--sig-of} flag (because
+no implementation is known), we fall back to old-style GHC mutual recursion.
+Na\"\i vely, a shaping pass would be necessary;
+so we adopt an existing constraint that
+already applies to hs-boot files: \emph{at the time we define a signature,
+we must know what the original name for all data types is}. In practice,
+GHC enforces this by stating that: (1) an hs-boot file must be
+accompanied with an implementation, and (2) the implementation must
+in fact define (and not reexport) all of the declarations in the signature.
+We can discover if a signature is intended to break a recursive module loop
+when we discover that $p\notin flags_H$; in this case, we fallback to the
+old hs-boot behavior. (Alternatively, the user can explicitly ask for it.)
+
+Why does this not require a shaping pass? The reason is that the
+signature is not really polymorphic: we require that the $\alpha$ module
+variable be resolved to a concrete module later in the same package, and
+that all the $\beta$ module variables be unified with $\alpha$. Thus, we
+know ahead of time the original names and don't need to deal with any
+renaming.\footnote{This strategy doesn't completely resolve the problem
+of cross-package mutual recursion, because we need to first compile a
+bit of the first package (signatures), then the second package, and then
+the rest of the first package.}
+
+Compiling packages in this way gives the tantalizing possibility
+of true separate compilation: the only thing we don't know is what the actual
+package name of an indefinite package will be, and what the correct references
+to have are. This is a very minor change to the assembly, so one could conceive
+of dynamically rewriting these references at the linking stage. But
+separate compilation achieved in this fashion would not be able to take
+advantage of cross-module optimizations.
+
+\subsection{Compiling includes}
+
+Includes are the most interesting part of the compilation process, as we have
+calculate how the holes of the subpackage we are filling in are compiled $H'$
+and modify our flags to make the exports of the include visible to subsequently
+compiled modules. We consider the case with renaming, since includes with
+no renaming are straightforward.
+
+First, we assume that we know \emph{a priori} what the holes of a
+package $p_H$ are (either by some sort of pre-pass, or explicit
+declaration.) For each of their \emph{renamed targets} $p'_H$, we look
+up the module in the current $flags$ environment, retrieving the
+physical module identity by consulting GHC with the
+\texttt{--resolve-module} flag and storing it in $H'$. (This can be done in batch.)
+For example:
+
+\begin{verbatim}
+package p where
+ A :: ...
+ ...
+package q where
+ A = [ ... ]
+ B = [ ... ]
+ include p (A as B)
+\end{verbatim}
+
+When computing the entry $H(\pname{A})$, we run the command \texttt{ghc --resolve-module} \pname{B}.
+
+Next, we recursively call \textsc{Compile} with the computed $H'$.
+Note that the entries in $H$ may refer to modules which would not be
+picked up by $flags$, but they will be registered in the inplace
+package database $db$.
+For example, in this situation:
+
+\begin{verbatim}
+package p where
+ B :: ...
+ C = [ import B; ... ]
+package q where
+ A = [ ... ]
+ B = [ import A; ... ]
+ include p
+ D = [ import C; ... ]
+\end{verbatim}
+
+When we recursively process package \pname{p}, $H$ will refer to
+\pname{q}:\m{B}, and we need to know where to find it (\pname{q} is only
+partially processed and so is in the inplace package database.)
+Furthermore, the interface file for \m{B} may refer to \pname{q}:\m{A},
+and thus we likewise need to know how to find its interface file.
+
+Note that the inplace package database is not expected to expose and
+packages. Otherwise, this example would improperly compile:
+
+\begin{verbatim}
+package p where
+ B = [ import A; ... ]
+package q where
+ A = ...
+ include p
+\end{verbatim}
+
+\pname{p} does not compile on its own, so it should not compile if it is
+recursively invoked from \pname{q}. However, if we exposed the modules
+of the partially registered package \pname{q}, \m{A} is now suddenly
+resolvable.
+
+Finally, once the subpackage is compiled, we can add it to our $flags$ so later
+modules we compile see its (appropriately thinned and renamed) modules, and like
+aliasing.
+
+\paragraph{Absence of an \texttt{hi} file}
+It is important that \texttt{--resolve-module} truly looks up the \emph{implementor}
+of a module, and not just a signature which is providing it at some name.
+Sometimes, a little extra work is necessary to compute this, for example:
+
+\begin{verbatim}
+package p where
+ A :: [ y :: Int ]
+package q where
+ A :: [ x :: Int ]
+ include p -- *
+package r where
+ A = [ x = 0; y = 1 ]
+ include q
+\end{verbatim}
+
+When computing $H'$ for the starred include, our $flags$ only include
+\texttt{-package-dir} \pname{r} $cwd_r$ $\langle\rangle$: with a thinning
+that excludes all modules! The only interface file we can pick up with these
+$flags$ is the local definition of \m{A}. However, we \emph{absolutely}
+should set $H'(\m{A})=\pname{q}:\m{A}$; if we do so, then we will incorrectly
+conclude when compiling the signature in \pname{p} that the implementation
+doesn't export enough identifiers to fulfill the signature (\texttt{y} is not
+available from just the signature in \pname{q}). Instead, we have to look
+up the original implementor of \m{A} in \pname{r}, and use that in $H'$.
+
+\subsection{Commentary}
+
+\paragraph{Just because it compiled, doesn't mean the individual packages type check}
+The compilation mechanism described is slightly more permissive than vanilla Backpack.
+Here is a simple example:
+
+\begin{verbatim}
+package p where
+ A :: [ data T = T ]
+ B :: [ data T = T ]
+ C = [
+ import A
+ import B
+ x = A.T :: B.T
+ ]
+package q where
+ A = [ data T = T ]
+ B = A
+ include p
+\end{verbatim}
+
+Here, we incorrectly decide \m{A}\verb|.T| and \m{B}\verb|.T| are type
+equal when typechecking \m{C}, because the \verb|hisig| files we
+generate for them all point to the same original implementation. However,
+\pname{p} should not typecheck.
+
+The problem here is that type checking asks ``does it compile with respect
+to all possible instantiations of the holes'', whereas compilation asks
+``does it compile with respect to this particular instantiation of holes.''
+In the absence of a shaping pass, this problem is unavoidable.
+
+\section{Shaped Backpack}
+
+Despite the simplicity of shapeless Backpack with the linking
+restriction in the absence of holes, we will find that when we have
+holes, it will be very difficult to do type-checking without
+some form of shaping. This section is very much a work in progress,
+but the ability to typecheck against holes, even with the linking restriction,
+is a very important part of modular separate development, so we will need
+to support it at some point.
+
+\subsection{Efficient shaping}
+
+(These are Edward's opinion, he hasn't convinced other folks that this is
+the right way to do it.)
+
+In this section, I want to argue that, although shaping constitutes
+a pre-pass which must be run before compilation in earnest, it is only
+about as bad as the dependency resolution analysis that GHC already does
+in \verb|ghc -M| or \verb|ghc --make|.
+
+In Paper Backpack, what information does shaping compute? It looks at
+exports, imports, data declarations and value declarations (but not the
+actual expressions associated with these values.) As a matter of fact,
+GHC already must look at the imports associated with a package in order
+to determine the dependency graph, so that it can have some order to compile
+modules in. There is a specialized parser which just parses these statements,
+and then ignores the rest of the file.
+
+A bit of background: the \emph{renamer} is responsible for resolving
+imports and figuring out where all of these entities actually come from.
+SPJ would really like to avoid having to run the renamer in order to perform
+a shaping pass.
+
+\paragraph{Is it necessary to run the Renamer to do shaping?}
+Edward and Scott believe the answer is no, well, partially.
+Shaping needs to know the original names of all entities exposed by a
+module/signature. Then it needs to know (a) which entities a module/signature
+defines/declares locally and (b) which entities that module/signature exports.
+The former, (a), can be determined by a straightforward inspection of a parse
+tree of the source file.\footnote{Note that no expression or type parsing
+is necessary. We only need names of local values, data types, and data
+constructors.} The latter, (b), is a bit trickier. Right now it's the Renamer
+that interprets imports and exports into original names, so we would still
+rely on that implementation. However, the Renamer does other, harder things
+that we don't need, so ideally we could factor out the import/export
+resolution from the Renamer for use in shaping.
+
+Unfortunately the Renamer's import resolution analyzes \verb|.hi| files, but for
+local modules, which haven't yet been typechecked, we don't have those.
+Instead, we could use a new file format, \verb|.hsi| files, to store the shape of
+a locally defined module. (Defined packages are bundled with their shapes,
+so included modules have \verb|.hsi| files as well.) (What about the logical
+vs.~physical distinction in file names?) If we refactor the import/export
+resolution code, could we rewrite it to generically operate on both
+\verb|.hi| files and \verb|.hsi| files?
+
+Alternatively, rather than storing shapes on a per-source basis, we could
+store (in memory) the entire package shape. Similarly, included packages
+could have a single shape file for the entire package. Although this approach
+would make shaping non-incremental, since an entire package's shape would
+be recomputed any time a constituent module's shape changes, we do not expect
+shaping to be all that expensive.
+
+\subsection{Typechecking of indefinite modules}\label{sec:typechecking-indefinite}
+
+Recall in our argument in the definite case, where we showed there are
+no holes in the physical context. With indefinite modules, this is no
+longer true. While (with the linking restriction) these holes will never
+be linked against a physical implementation, they may be linked against
+other signatures. (Note: while disallowing signature linking would
+solve our problem, it would disallow a wide array of useful instances of
+signature reuse, for example, a package mylib that implements both
+mylib-1x-sig and mylib-2x-sig.)
+
+With holes, we must handle module variables, and we sometimes must unify them:
+
+\begin{verbatim}
+package p where
+ A :: [ data A ]
+package q where
+ A :: [ data A ]
+package r where
+ include p
+ include q
+\end{verbatim}
+
+In this package, it is not possible to a priori assign original names to
+module A in p and q, because in package r, they should have the same
+original name. When signature linking occurs, unification may occur,
+which means we have to rename all relevant original names. (A similar
+situation occurs when a module is typechecked against a signature.)
+
+An invariant which would be nice to have is this: when typechecking a
+signature or including a package, we may apply renaming to the entities
+being brought into context. But once we've picked an original name for
+our entities, no further renaming should be necessary. (Formally, in the
+unification for semantic object shapes, apply the unifier to the second
+shape, but not the first one.)
+
+However, there are plenty of counterexamples here:
+
+\begin{verbatim}
+package p where
+ A :: [ data A ]
+ B :: [ data A ]
+ M = ...
+ A = B
+\end{verbatim}
+
+In this package, does module M know that A.A and B.A are type equal? In
+fact, the shaping pass will have assigned equal module identities to A
+and B, so M \emph{equates these types}, despite the aliasing occurring
+after the fact.
+
+We can make this example more sophisticated, by having a later
+subpackage which causes the aliasing; now, the decision is not even a
+local one (on the other hand, the equality should be evident by inspection
+of the package interface associated with q):
+
+\begin{verbatim}
+package p where
+ A :: [ data A ]
+ B :: [ data A ]
+package q where
+ A :: [ data A ]
+ B = A
+package r where
+ include p
+ include q
+\end{verbatim}
+
+Another possibility is that it might be acceptable to do a mini-shaping
+pass, without parsing modules or signatures, \emph{simply} looking at
+names and aliases. But logical names are not the only mechanism by
+which unification may occur:
+
+\begin{verbatim}
+package p where
+ C :: [ data A ]
+ A = [ data A = A ]
+ B :: [ import A(A) ]
+ C = B
+\end{verbatim}
+
+It is easy to conclude that the original names of C and B are the same. But
+more importantly, C.A must be given the original name of p:A.A. This can only
+be discovered by looking at the signature definition for B. In any case, it
+is worth noting that this situation parallels the situation with hs-boot
+files (although there is no mutual recursion here).
+
+The conclusion is that you will probably, in fact, have to do real
+shaping in order to typecheck all of these examples.
+
+\paragraph{Hey, these signature imports are kind of tricky\ldots}
+
+When signatures and modules are interleaved, the interaction can be
+complex. Here is an example:
+
+\begin{verbatim}
+package p where
+ C :: [ data A ]
+ M = [ import C; ... ]
+ A = [ import M; data A = A ]
+ C :: [ import A(A) ]
+\end{verbatim}
+
+Here, the second signature for C refers to a module implementation A
+(this is permissible: it simply means that the original name for p:C.A
+is p:A.A). But wait! A relies on M, and M relies on C. Circularity?
+Fortunately not: a client of package p will find it impossible to have
+the hole C implemented in advance, since they will need to get their hands on module
+A\ldots but it will not be defined prior to package p.
+
+In any case, however, it would be good to emit a warning if a package
+cannot be compiled without mutual recursion.
+
+\subsection{Rename on entry}
+
+Consider the following example:
+
+\begin{verbatim}
+package p where
+ A :: [ data T = T ]
+ B = [ import A; x = T ]
+package q where
+ C :: ...
+ A = [ data T = T ]
+ include p
+ D = [
+ import qualified A
+ import qualified B
+ import C
+ x = B.T :: A.T
+ ]
+\end{verbatim}
+
+We are interested in type-checking \pname{q}, which is an indefinite package
+on account of the uninstantiated hole \m{C}. Furthermore, let's suppose that
+\pname{p} has already been independently typechecked, and its interface files
+installed in some global location with $\alpha_A$ used as the module identity
+of \m{A}. (To simplify this example, we'll assume $\beta_{AT}=\alpha_A$.)
+
+The first three lines of \pname{q} type check in the normal way, but \m{D}
+now poses a problem: if we load the interface file for \m{B} the normal way,
+we will get a reference to type \texttt{T} with the original name $\alpha_A$.\texttt{T},
+whereas from \m{A} we have an original name \pname{q}:\m{A}.\texttt{T}.
+
+Let's suppose that we already have the result of a shaping pass, which
+maps our identity variables to their true identities.
+Let's consider the possible options here:
+
+\begin{itemize}
+ \item We could re-typecheck \pname{p}, feeding it the correct instantiations
+ for its variables. However, this seems wasteful: we typechecked the
+ package already, and up-to-renaming, the interface files are exactly
+ what we need to type check our application.
+ \item We could make copies of all the interface files, renamed to have the
+ right original names. This also seems wasteful: why should we have to
+ create a new copy of every interface file in a library we depend on?
+ \item When \emph{reading in} the interface file to GHC, we could apply the
+ renaming according to the shaping pass and store that in memory.
+\end{itemize}
+
+That last solution is pretty appealing, however, there are still circumstances
+we need to create new interface files; these exactly mirror the cases described
+in Section~\ref{sec:compiling}.
+
+\subsection{Incremental typechecking}
+We want to typecheck modules incrementally, i.e., when something changes in
+a package, we only want to re-typecheck the modules that care about that
+change. GHC already does this today.%
+\footnote{\url{https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/RecompilationAvoidance}}
+Is the same mechanism sufficient for Backpack? Edward and Scott think that it
+is, mostly. Our conjecture is that a module should be re-typechecked if the
+existing mechanism says it should \emph{or} if the logical shape
+context (which maps logical names to physical names) has changed. The latter
+condition is due to aliases that affect typechecking of modules.
+
+Let's look again at an example from before:
+\begin{verbatim}
+package p where
+ A :: [ data A ]
+ B :: [ data A ]
+ M = [ import A; import B; ... ]
+\end{verbatim}
+Let's say that \verb|M| is typechecked successfully. Now we add an alias binding
+at the end of the package, \verb|A = B|. Does \verb|M| need to be
+re-typechecked? Yes! (Well, it seems so, but let's just assert ``yes'' for now.
+Certainly in the reverse case---if we remove the alias and then ask---this
+is true, since \verb|M| might have depended on the two \verb|A| types
+being the same.)
+The logical shape context changed to say that \verb|A| and
+\verb|B| now map to the same physical module identity. But does the existing
+recompilation avoidance mechanism say that \verb|M| should be re-typechecked?
+It's unclear. The \verb|.hi| file for \verb|M| records that it imported \verb|A| and
+\verb|B| with particular ABIs, but does it also know about the physical module
+identities (or rather, original module names) of these modules?
+
+Scott thinks this highlights the need for us to get our story straight about
+the connection between logical names, physical module identities, and file
+names!
+
+
+\subsection{Installing indefinite packages}\label{sec:installing-indefinite}
+
+If an indefinite package contains no code at all, we only need
+to install the interface file for the signatures. However, if
+they include code, we must provide all of the
+ingredients necessary to compile them when the holes are linked against
+actual implementations. (Figure~\ref{fig:pkgdb})
+
+\paragraph{Source tarball or preprocessed source?} What is the representation of the source that is saved is. There
+are a number of possible choices:
+
+\begin{itemize}
+ \item The original tarballs downloaded from Hackage,
+ \item Preprocessed source files,
+ \item Some sort of internal, type-checked representation of Haskell code (maybe the output of the desugarer).
+\end{itemize}
+
+Storing the tarballs is the simplest and most straightforward mechanism,
+but we will have to be very certain that we can recompile the module
+later in precisely the same we compiled it originally, to ensure the hi
+files match up (fortunately, it should be simple to perform an optional
+sanity check before proceeding.) The appeal of saving preprocessed
+source, or even the IRs, is that this is conceptually this is exactly
+what an indefinite package is: we have paused the compilation process
+partway, intending to finish it later. However, our compilation strategy
+for definite packages requires us to run this step using a \emph{different}
+choice of original names, so it's unclear how much work could actually be reused.
+
+\section{Surface syntax}
+
+In the Backpack paper, a brand new module language is presented, with
+syntax for inline modules and signatures. This syntax is probably worth implementing,
+because it makes it easy to specify compatibility packages, whose module
+definitions in general may be very short:
+
+\begin{verbatim}
+package ishake-0.12-shake-0.13 where
+ include shake-0.13
+ Development.Shake.Sys = Development.Shake.Cmd
+ Development.Shake = [ (**>) = (&>) ; (*>>) = (|*>)]
+ Development.Shake.Rule = [ defaultPriority = rule . priority 0.5 ]
+ include ishake-0.12
+\end{verbatim}
+
+However, there are a few things that are less than ideal about the
+surface syntax proposed by Paper Backpack:
+
+\begin{itemize}
+ \item It's completely different from the current method users
+ specify packages. There's nothing wrong with this per se
+ (one simply needs to support both formats) but the smaller
+ the delta, the easier the new packaging format is to explain
+ and implement.
+
+ \item Sometimes order matters (relative ordering of signatures and
+ module implementations), and other times it does not (aliases).
+ This can be confusing for users.
+
+ \item Users have to order module definitions topologically,
+ whereas in current Cabal modules can be listed in any order, and
+ GHC figures out an appropriate order to compile them.
+\end{itemize}
+
+Here is an alternative proposal, closely based on Cabal syntax. Given
+the following Backpack definition:
+
+\begin{verbatim}
+package libfoo(A, B, C, Foo) where
+ include base
+ -- renaming and thinning
+ include libfoo (Foo, Bar as Baz)
+ -- holes
+ A :: [ a :: Bool ].hsig
+ A2 :: [ b :: Bool ].hsig
+ -- normal module
+ B = [
+ import {-# SOURCE #-} A
+ import Foo
+ import Baz
+ ...
+ ].hs
+ -- recursively linked pair of modules, one is private
+ C :: [ data C ].hsig
+ D = [ import {-# SOURCE #-} C; data D = D C ].hs
+ C = [ import D; data C = C D ].hs
+ -- alias
+ A = A2
+\end{verbatim}
+
+We can write the following Cabal-like syntax instead (where
+all of the signatures and modules are placed in appropriately
+named files):
+
+\begin{verbatim}
+package: libfoo
+...
+build-depends: base, libfoo (Foo, Bar as Baz)
+holes: A A2 -- deferred for now
+exposed-modules: Foo B C
+aliases: A = A2
+other-modules: D
+\end{verbatim}
+
+Notably, all of these lists are \emph{insensitive} to ordering!
+The key idea is use of the \verb|{-# SOURCE #-}| pragma, which
+is enough to solve the important ordering constraint between
+signatures and modules.
+
+Here is how the elaboration works. For simplicity, in this algorithm
+description, we assume all packages being compiled have no holes
+(including \verb|build-depends| packages). Later, we'll discuss how to
+extend the algorithm to handle holes in both subpackages and the main
+package itself.
+
+\begin{enumerate}
+
+ \item At the top-level with \verb|package| $p$ and
+ \verb|exposed-modules| $ms$, record \verb|package p (ms) where|
+
+ \item For each package $p$ with thinning/renaming $ms$ in
+ \verb|build-depends|, record a \verb|include p (ms)| in the
+ Backpack package. The ordering of these includes does not
+ matter, since none of these packages have holes.
+
+ \item Take all modules $m$ in \verb|other-modules| and
+ \verb|exposed-modules| which were not exported by build
+ dependencies, and create a directed graph where hs and hs-boot
+ files are nodes and imports are edges (the target of an edge is
+ an hs file if it is a normal import, and an hs-boot file if it
+ is a SOURCE import). Topologically sort this graph, erroring if
+ this graph contains cycles (even with recursive modules, the
+ cycle should have been broken by an hs-boot file). For each
+ node, in this order, record \verb|M = [ ... ]| or \verb|M :: [ ... ]|
+ depending on whether or not it is an hs or hs-boot. If possible,
+ sort signatures before implementations when there is no constraint
+ otherwise.
+
+\end{enumerate}
+
+Here is a simple example which shows how SOURCE can be used to disambiguate
+between two important cases. Suppose we have these modules:
+
+\begin{verbatim}
+-- A1.hs
+import {-# SOURCE #-} B
+
+-- A2.hs
+import B
+
+-- B.hs
+x = True
+
+-- B.hs-boot
+x :: Bool
+\end{verbatim}
+
+Then we translate the following packages as follows:
+
+\begin{verbatim}
+exposed-modules: A1 B
+-- translates to
+B :: [ x :: Bool ]
+A1 = [ import B ]
+B = [ x = True ]
+\end{verbatim}
+
+but
+
+\begin{verbatim}
+exposed-modules: A2 B
+-- translates to
+B = [ x = True ]
+B :: [ x :: Bool ]
+A2 = [ import B ]
+\end{verbatim}
+
+The import controls placement between signature and module, and in A1 it
+forces B's signature to be sorted before B's implementation (whereas in
+the second section, there is no constraint so we preferentially place
+the B's implementation first)
+
+\paragraph{Holes in the database} In the presence of holes,
+\verb|build-depends| resolution becomes more complicated. First,
+let's consider the case where the package we are building is
+definite, but the package database contains indefinite packages with holes.
+In order to maintain the linking restriction, we now have to order packages
+from step (2) of the previous elaboration. We can do this by creating
+a directed graph, where nodes are packages and edges are from holes to the
+package which implements them. If there is a cycle, this indicates a mutually
+recursive package. In the absence of cycles, a topological sorting of this
+graph preserves the linking invariant.
+
+One subtlety to consider is the fact that an entry in \verb|build-depends|
+can affect how a hole is instantiated by another entry. This might be a
+bit weird to users, who might like to explicitly say how holes are
+filled when instantiating a package. Food for thought, surface syntax wise.
+
+\paragraph{Holes in the package} Actually, this is quite simple: the
+ordering of includes goes as before, but some indefinite packages in the
+database are less constrained as they're ``dependencies'' are fulfilled
+by the holes at the top-level of this package. It's also worth noting
+that some dependencies will go unresolved, since the following package
+is valid:
+
+\begin{verbatim}
+package a where
+ A :: ...
+package b where
+ include a
+\end{verbatim}
+
+\paragraph{Multiple signatures} In Backpack syntax, it's possible to
+define a signature multiple times, which is necessary for mutually
+recursive signatures:
+
+\begin{verbatim}
+package a where
+ A :: [ data A ]
+ B :: [ import A; data B = B A ]
+ A :: [ import B; data A = A B ]
+\end{verbatim}
+
+Critically, notice that we can see the constructors for both module B and A
+after the signatures are linked together. This is not possible in GHC
+today, but could be possible by permitting multiple hs-boot files. Now
+the SOURCE pragma indicating an import must \emph{disambiguate} which
+hs-boot file it intends to include. This might be one way of doing it:
+
+\begin{verbatim}
+-- A.hs-boot2
+data A
+
+-- B.hs-boot
+import {-# SOURCE hs-boot2 #-} A
+
+-- A.hs-boot
+import {-# SOURCE hs-boot #-} B
+\end{verbatim}
+
+\paragraph{Explicit or implicit reexports} One annoying property of
+this proposal is that, looking at the \verb|exposed-modules| list, it is
+not immediately clear what source files one would expect to find in the
+current package. It's not obvious what the proper way to go about doing
+this is.
+
+\paragraph{Better syntax for SOURCE} If we enshrine the SOURCE import
+as a way of solving Backpack ordering problems, it would be nice to have
+some better syntax for it. One possibility is:
+
+\begin{verbatim}
+abstract import Data.Foo
+\end{verbatim}
+
+which makes it clear that this module is pluggable, typechecking against
+a signature. Note that this only indicates how type checking should be
+done: when actually compiling the module we will compile against the
+interface file for the true implementation of the module.
+
+It's worth noting that the SOURCE annotation was originally made a
+pragma because, in principle, it should have been possible to compile
+some recursive modules without needing the hs-boot file at all. But if
+we're moving towards boot files as signatures, this concern is less
+relevant.
+
+\section{Type classes and type families}
+
+\subsection{Background}
+
+Before we talk about how to support type classes in Backpack, it's first
+worth talking about what we are trying to achieve in the design. Most
+would agree that \emph{type safety} is the cardinal law that should be
+preserved (in the sense that segfaults should not be possible), but
+there are many instances of ``bad behavior'' (top level mutable state,
+weakening of abstraction guarantees, ambiguous instance resolution, etc)
+which various Haskellers may disagree on the necessity of ruling out.
+
+With this in mind, it is worth summarizing what kind of guarantees are
+presently given by GHC with regards to type classes and type families,
+as well as characterizing the \emph{cultural} expectations of the
+Haskell community.
+
+\paragraph{Type classes} When discussing type class systems, there are
+several properties that one may talk about.
+A set of instances is \emph{confluent} if, no matter what order
+constraint solving is performed, GHC will terminate with a canonical set
+of constraints that must be satisfied for any given use of a type class.
+In other words, confluence says that we won't conclude that a program
+doesn't type check just because we swapped in a different constraint
+solving algorithm.
+
+Confluence's closely related twin is \emph{coherence} (defined in ``Type
+classes: exploring the design space''). This property states that
+``every different valid typing derivation of a program leads to a
+resulting program that has the same dynamic semantics.'' Why could
+differing typing derivations result in different dynamic semantics? The
+answer is that context reduction, which picks out type class instances,
+elaborates into concrete choices of dictionaries in the generated code.
+Confluence is a prerequisite for coherence, since one
+can hardly talk about the dynamic semantics of a program that doesn't
+type check.
+
+In the vernacular, confluence and coherence are often incorrectly used
+to refer to another related property: \emph{global uniqueness of instances},
+which states that in a fully compiled program, for any type, there is at most one
+instance resolution for a given type class. Languages with local type
+class instances such as Scala generally do not have this property, and
+this assumption is frequently used for abstraction.
+
+So, what properties does GHC enforce, in practice?
+In the absence of any type system extensions, GHC's employs a set of
+rules (described in ``Exploring the design space'') to ensure that type
+class resolution is confluent and coherent. Intuitively, it achieves
+this by having a very simple constraint solving algorithm (generate
+wanted constraints and solve wanted constraints) and then requiring the
+set of instances to be \emph{nonoverlapping}, ensuring there is only
+ever one way to solve a wanted constraint. Overlap is a
+more stringent restriction than either confluence or coherence, and
+via the \verb|OverlappingInstances| and \verb|IncoherentInstances|, GHC
+allows a user to relax this restriction ``if they know what they're doing.''
+
+Surprisingly, however, GHC does \emph{not} enforce global uniqueness of
+instances. Imported instances are not checked for overlap until we
+attempt to use them for instance resolution. Consider the following program:
+
+\begin{verbatim}
+-- T.hs
+data T = T
+-- A.hs
+import T
+instance Eq T where
+-- B.hs
+import T
+instance Eq T where
+-- C.hs
+import A
+import B
+\end{verbatim}
+
+When compiled with one-shot compilation, \verb|C| will not report
+overlapping instances unless we actually attempt to use the \verb|Eq|
+instance in C.\footnote{When using batch compilation, GHC reuses the
+ instance database and is actually able to detect the duplicated
+ instance when compiling B. But if you run it again, recompilation
+avoidance skips A, and it finishes compiling! See this bug:
+\url{https://ghc.haskell.org/trac/ghc/ticket/5316}} This is by
+design\footnote{\url{https://ghc.haskell.org/trac/ghc/ticket/2356}}:
+ensuring that there are no overlapping instances eagerly requires
+eagerly reading all the interface files a module may depend on.
+
+We might summarize these three properties in the following manner.
+Culturally, the Haskell community expects \emph{global uniqueness of instances}
+to hold: the implicit global database of instances should be
+confluent and coherent. GHC, however, does not enforce uniqueness of
+instances: instead, it merely guarantees that the \emph{subset} of the
+instance database it uses when it compiles any given module is confluent and coherent. GHC does do some
+tests when an instance is declared to see if it would result in overlap
+with visible instances, but the check is by no means
+perfect\footnote{\url{https://ghc.haskell.org/trac/ghc/ticket/9288}};
+truly, \emph{type-class constraint resolution} has the final word. One
+mitigating factor is that in the absence of \emph{orphan instances}, GHC is
+guaranteed to eagerly notice when the instance database has overlap.\footnote{Assuming that the instance declaration checks actually worked\ldots}
+
+Clearly, the fact that GHC's lazy behavior is surprising to most
+Haskellers means that the lazy check is mostly good enough: a user
+is likely to discover overlapping instances one way or another.
+However, it is relatively simple to construct example programs which
+violate global uniqueness of instances in an observable way:
+
+\begin{verbatim}
+-- A.hs
+module A where
+data U = X | Y deriving (Eq, Show)
+
+-- B.hs
+module B where
+import Data.Set
+import A
+
+instance Ord U where
+compare X X = EQ
+compare X Y = LT
+compare Y X = GT
+compare Y Y = EQ
+
+ins :: U -> Set U -> Set U
+ins = insert
+
+-- C.hs
+module C where
+import Data.Set
+import A
+
+instance Ord U where
+compare X X = EQ
+compare X Y = GT
+compare Y X = LT
+compare Y Y = EQ
+
+ins' :: U -> Set U -> Set U
+ins' = insert
+
+-- D.hs
+module Main where
+import Data.Set
+import A
+import B
+import C
+
+test :: Set U
+test = ins' X $ ins X $ ins Y $ empty
+
+main :: IO ()
+main = print test
+
+-- OUTPUT
+$ ghc -Wall -XSafe -fforce-recomp --make D.hs
+[1 of 4] Compiling A ( A.hs, A.o )
+[2 of 4] Compiling B ( B.hs, B.o )
+
+B.hs:5:10: Warning: Orphan instance: instance [safe] Ord U
+[3 of 4] Compiling C ( C.hs, C.o )
+
+C.hs:5:10: Warning: Orphan instance: instance [safe] Ord U
+[4 of 4] Compiling Main ( D.hs, D.o )
+Linking D ...
+$ ./D
+fromList [X,Y,X]
+\end{verbatim}
+
+Locally, all type class resolution was coherent: in the subset of
+instances each module had visible, type class resolution could be done
+unambiguously. Furthermore, the types of \verb|ins| and \verb|ins'|
+discharge type class resolution, so that in \verb|D| when the database
+is now overlapping, no resolution occurs, so the error is never found.
+
+It is easy to dismiss this example as an implementation wart in GHC, and
+continue pretending that global uniqueness of instances holds. However,
+the problem with \emph{global uniqueness of instances} is that they are
+inherently nonmodular: you might find yourself unable to compose two
+components because they accidentally defined the same type class
+instance, even though these instances are plumbed deep in the
+implementation details of the components.
+
+As it turns out, there is already another feature in Haskell which
+must enforce global uniqueness, to prevent segfaults.
+We now turn to type classes' close cousin: type families.
+
+\paragraph{Type families} With type families, confluence is the primary
+property of interest. (Coherence is not of much interest because type
+families are elaborated into coercions, which don't have any
+computational content.) Rather than considering what the set of
+constraints we reduce to, confluence for type families considers the
+reduction of type families. The overlap checks for type families
+can be quite sophisticated, especially in the case of closed type
+families.
+
+Unlike type classes, however, GHC \emph{does} check the non-overlap
+of type families eagerly. The analogous program does \emph{not} type check:
+
+\begin{verbatim}
+-- F.hs
+type family F a :: *
+-- A.hs
+import F
+type instance F Bool = Int
+-- B.hs
+import F
+type instance F Bool = Bool
+-- C.hs
+import A
+import B
+\end{verbatim}
+
+The reason is that it is \emph{unsound} to ever allow any overlap
+(unlike in the case of type classes where it just leads to incoherence.)
+Thus, whereas one might imagine dropping the global uniqueness of instances
+invariant for type classes, it is absolutely necessary to perform global
+enforcement here. There's no way around it!
+
+\subsection{Local type classes}
+
+Here, we say \textbf{NO} to global uniqueness.
+
+This design is perhaps best discussed in relation to modular type
+classes, which shares many similar properties. Instances are now
+treated as first class objects (in MTCs, they are simply modules)---we
+may explicitly hide or include instances for type class resolution (in
+MTCs, this is done via the \verb|using| top-level declaration). This is
+essentially what was sketched in Section 5 of the original Backpack
+paper. As a simple example:
+
+\begin{verbatim}
+package p where
+ A :: [ data T = T ]
+ B = [ import A; instance Eq T where ... ]
+
+package q where
+ A = [ data T = T; instance Eq T where ... ]
+ include p
+\end{verbatim}
+
+Here, \verb|B| does not see the extra instance declared by \verb|A|,
+because it was thinned from its signature of \verb|A| (and thus never
+declared canonical.) To declare an instance without making it
+canonical, it must placed in a separate (unimported) module.
+
+Like modular type classes, Backpack does not give rise to incoherence,
+because instance visibility can only be changed at the top level module
+language, where it is already considered best practice to provide
+explicit signatures. Here is the example used in the Modular Type
+Classes paper to demonstrate the problem:
+
+\begin{verbatim}
+structure A = using EqInt1 in
+ struct ...fun f x = eq(x,x)... end
+structure B = using EqInt2 in
+ struct ...val y = A.f(3)... end
+\end{verbatim}
+
+Is the type of f \verb|int -> bool|, or does it have a type-class
+constraint? Because type checking proceeds over the entire program, ML
+could hypothetically pick either. However, ported to Haskell, the
+example looks like this:
+
+\begin{verbatim}
+EqInt1 :: [ instance Eq Int ]
+EqInt2 :: [ instance Eq Int ]
+A = [
+ import EqInt1
+ f x = x == x
+]
+B = [
+ import EqInt2
+ import A hiding (instance Eq Int)
+ y = f 3
+]
+\end{verbatim}
+
+There may be ambiguity, yes, but it can be easily resolved by the
+addition of a top-level type signature to \verb|f|, which is considered
+best-practice anyway. Additionally, Haskell users are trained to expect
+a particular inference for \verb|f| in any case (the polymorphic one).
+
+Here is another example which might be considered surprising:
+
+\begin{verbatim}
+package p where
+ A :: [ data T = T ]
+ B :: [ data T = T ]
+ C = [
+ import qualified A
+ import qualified B
+ instance Show A.T where show T = "A"
+ instance Show B.T where show T = "B"
+ x :: String
+ x = show A.T ++ show B.T
+ ]
+\end{verbatim}
+
+In the original Backpack paper, it was implied that module \verb|C|
+should not type check if \verb|A.T = B.T| (failing at link time).
+However, if we set aside, for a moment, the issue that anyone who
+imports \verb|C| in such a context will now have overlapping instances,
+there is no reason in principle why the module itself should be
+problematic. Here is the example in MTCs, which I have good word from
+Derek does type check.
+
+\begin{verbatim}
+signature SIG = sig
+ type t
+ val mk : t
+end
+signature SHOW = sig
+ type t
+ val show : t -> string
+end
+functor Example (A : SIG) (B : SIG) =
+ let structure ShowA : SHOW = struct
+ type t = A.t
+ fun show _ = "A"
+ end in
+ let structure ShowB : SHOW = struct
+ type t = B.t
+ fun show _ = "B"
+ end in
+ using ShowA, ShowB in
+ struct
+ val x = show A.mk ++ show B.mk
+ end : sig val x : string end
+\end{verbatim}
+
+The moral of the story is, even though in a later context the instances
+are overlapping, inside the functor, the type-class resolution is unambiguous
+and should be done (so \verb|x = "AB"|).
+
+Up until this point, we've argued why MTCs and this Backpack design are similar.
+However, there is an important sociological difference between modular type-classes
+and this proposed scheme for Backpack. In the presentation ``Why Applicative
+Functors Matter'', Derek mentions the canonical example of defining a set:
+
+\begin{verbatim}
+signature ORD = sig type t; val cmp : t -> t -> bool end
+signature SET = sig type t; type elem;
+ val empty : t;
+ val insert : elem -> t -> t ...
+end
+functor MkSet (X : ORD) :> SET where type elem = X.t
+ = struct ... end
+\end{verbatim}
+
+This is actually very different from how sets tend to be defined in
+Haskell today. If we directly encoded this in Backpack, it would
+look like this:
+
+\begin{verbatim}
+package mk-set where
+ X :: [
+ data T
+ cmp :: T -> T-> Bool
+ ]
+ Set :: [
+ data Set
+ empty :: Set
+ insert :: T -> Set -> Set
+ ]
+ Set = [
+ import X
+ ...
+ ]
+\end{verbatim}
+
+It's also informative to consider how MTCs would encode set as it is written
+today in Haskell:
+
+\begin{verbatim}
+signature ORD = sig type t; val cmp : t -> t -> bool end
+signature SET = sig type 'a t;
+ val empty : 'a t;
+ val insert : (X : ORD) => X.t -> X.t t -> X.t t
+end
+struct MkSet :> SET = struct ... end
+\end{verbatim}
+
+Here, it is clear to see that while functor instantiation occurs for
+implementation, it is not occuring for types. This is a big limitation
+with the Haskell approach, and it explains why Haskellers, in practice,
+find global uniqueness of instances so desirable.
+
+Implementation-wise, this requires some subtle modifications to how we
+do type class resolution. Type checking of indefinite modules works as
+before, but when go to actually compile them against explicit
+implementations, we need to ``forget'' that two types are equal when
+doing instance resolution. This could probably be implemented by
+associating type class instances with the original name that was
+utilized when typechecking, so that we can resolve ambiguous matches
+against types which have the same original name now that we are
+compiling.
+
+As we've mentioned previously, this strategy is unsound for type families.
+
+\subsection{Globally unique}
+
+Here, we say \textbf{YES} to global uniqueness.
+
+When we require the global uniqueness of instances (either because
+that's the type class design we chose, or because we're considering
+the problem of type families), we will need to reject declarations like the
+one cited above when \verb|A.T = B.T|:
+
+\begin{verbatim}
+A :: [ data T ]
+B :: [ data T ]
+C :: [
+ import qualified A
+ import qualified B
+ instance Show A.T where show T = "A"
+ instance Show B.T where show T = "B"
+]
+\end{verbatim}
+
+The paper mentions that a link-time check is sufficient to prevent this
+case from arising. While in the previous section, we've argued why this
+is actually unnecessary when local instances are allowed, the link-time
+check is a good match in the case of global instances, because any
+instance \emph{must} be declared in the signature. The scheme proceeds
+as follows: when some instances are typechecked initially, we type check
+them as if all of variable module identities were distinct. Then, when
+we perform linking (we \verb|include| or we unify some module
+identities), we check again if to see if we've discovered some instance
+overlap. This linking check is akin to the eager check that is
+performed today for type families; it would need to be implemented for
+type classes as well: however, there is a twist: we are \emph{redoing}
+the overlap check now that some identities have been unified.
+
+As an implementation trick, one could deferring the check until \verb|C|
+is compiled, keeping in line with GHC's lazy ``don't check for overlap
+until the use site.'' (Once again, unsound for type families.)
+
+\paragraph{What about module inequalities?} An older proposal was for
+signatures to contain ``module inequalities'', i.e., assertions that two
+modules are not equal. (Technically: we need to be able to apply this
+assertion to $\beta$ module variables, since \verb|A != B| while
+\verb|A.T = B.T|). Currently, Edward thinks that module inequalities
+are only marginal utility with local instances (i.e., not enough to
+justify the implementation cost) and not useful at all in the world of
+global instances!
+
+With local instances, module inequalities could be useful to statically
+rule out examples like \verb|show A.T ++ show B.T|. Because such uses
+are not necessarily reflected in the signature, it would be a violation
+of separate module development to try to divine the constraint from the
+implementation itself. I claim this is of limited utility, however, because,
+as we mentioned earlier, we can compile these ``incoherent'' modules perfectly
+coherently. With global instances, all instances must be in the signature, so
+while it might be aesthetically displeasing to have the signature impose
+extra restrictions on linking identities, we can carry this out without
+violating the linking restriction.
+
+\section{Bits and bobs}
+
+\subsection{Abstract type synonyms}
+
+In Paper Backpack, abstract type synonyms are not permitted, because GHC doesn't
+understand how to deal with them. The purpose of this section is to describe
+one particularly nastiness of abstract type synonyms, by way of the occurs check:
+
+\begin{verbatim}
+A :: [ type T ]
+B :: [ import qualified A; type T = [A.T] ]
+\end{verbatim}
+
+At this point, it is illegal for \verb|A = B|, otherwise this type synonym would
+fail the occurs check. This seems like pretty bad news, since every instance
+of the occurs check in the type-checker could constitute a module inequality.
+
+\section{Open questions}\label{sec:open-questions}
+
+Here are open problems about the implementation which still require
+hashing out.
+
+\begin{itemize}
+
+ \item In Section~\ref{sec:simplifying-backpack}, we argued that we
+ could implement Backpack without needing a shaping pass. We're
+ pretty certain that this will work for typechecking and
+ compiling fully definite packages with no recursive linking, but
+ in Section~\ref{sec:typechecking-indefinite}, we described some
+ of the prevailing difficulties of supporting signature linking.
+ Renaming is not an insurmountable problem, but backwards flow of
+ shaping information can be, and it is unclear how best to
+ accommodate this. This is probably the most important problem
+ to overcome.
+
+ \item In Section~\ref{sec:installing-indefinite}, a few choices for how to
+ store source code were pitched, however, there is not consensus on which
+ one is best.
+
+ \item What is the impact of the multiplicity of PackageIds on
+ dependency solving in Cabal? Old questions of what to prefer
+ when multiple package-versions are available (Cabal originally
+ only needed to solve this between different versions of the same
+ package, preferring the oldest version), but with signatures,
+ there are more choices. Should there be a complex solver that
+ does all signature solving, or a preprocessing step that puts
+ things back into the original Cabal version. Authors may want
+ to suggest policy for what packages should actually link against
+ signatures (so a crypto library doesn't accidentally link
+ against a null cipher package).
+
+\end{itemize}
+
+\end{document}
diff --git a/docs/backpack/commands-new-new.tex b/docs/backpack/commands-new-new.tex
new file mode 100644
index 000000000000..1f2466e14cb7
--- /dev/null
+++ b/docs/backpack/commands-new-new.tex
@@ -0,0 +1,891 @@
+%!TEX root = paper/paper.tex
+\usepackage{amsmath}
+\usepackage{amssymb}
+\usepackage{amsthm}
+\usepackage{xspace}
+\usepackage{color}
+\usepackage{xifthen}
+\usepackage{graphicx}
+\usepackage{amsbsy}
+\usepackage{mathtools}
+\usepackage{stmaryrd}
+\usepackage{url}
+\usepackage{alltt}
+\usepackage{varwidth}
+% \usepackage{hyperref}
+\usepackage{datetime}
+\usepackage{subfig}
+\usepackage{array}
+\usepackage{multirow}
+\usepackage{xargs}
+\usepackage{marvosym} % for MVAt
+\usepackage{bm} % for blackboard bold semicolon
+
+
+%% HYPERREF COLORS
+\definecolor{darkred}{rgb}{.7,0,0}
+\definecolor{darkgreen}{rgb}{0,.5,0}
+\definecolor{darkblue}{rgb}{0,0,.5}
+% \hypersetup{
+% linktoc=page,
+% colorlinks=true,
+% linkcolor=darkred,
+% citecolor=darkgreen,
+% urlcolor=darkblue,
+% }
+
+% Coloring
+\definecolor{hilite}{rgb}{0.7,0,0}
+\newcommand{\hilite}[1]{\color{hilite}#1\color{black}}
+\definecolor{shade}{rgb}{0.85,0.85,0.85}
+\newcommand{\shade}[1]{\colorbox{shade}{\!\ensuremath{#1}\!}}
+
+% Misc
+\newcommand\evalto{\hookrightarrow}
+\newcommand\elabto{\rightsquigarrow}
+\newcommand\elabtox[1]{\stackrel{#1}\rightsquigarrow}
+\newcommand{\yields}{\uparrow}
+\newcommand\too{\Rightarrow}
+\newcommand{\nil}{\cdot}
+\newcommand{\eps}{\epsilon}
+\newcommand{\Ups}{\Upsilon}
+\newcommand{\avoids}{\mathrel{\#}}
+
+\renewcommand{\vec}[1]{\overline{#1}}
+\newcommand{\rname}[1]{\textsc{#1}}
+\newcommand{\infrule}[3][]{%
+ \vspace{0.5ex}
+ \frac{\begin{array}{@{}c@{}}#2\end{array}}%
+ {\mbox{\ensuremath{#3}}}%
+ \ifthenelse{\isempty{#1}}%
+ {}%
+ % {\hspace{1ex}\rlap{(\rname{#1})}}%
+ {\hspace{1ex}(\rname{#1})}%
+ \vspace{0.5ex}
+}
+\newcommand{\infax}[2][]{\infrule[#1]{}{#2}}
+\newcommand{\andalso}{\hspace{.5cm}}
+\newcommand{\suchthat}{~\mathrm{s.t.}~}
+\newenvironment{notes}%
+ {\vspace{-1.5em}\begin{itemize}\setlength\itemsep{0pt}\small}%
+ {\end{itemize}}
+\newcommand{\macrodef}{\mathbin{\overset{\mathrm{def}}{=}}}
+\newcommand{\macroiff}{\mathbin{\overset{\mathrm{def}}{\Leftrightarrow}}}
+
+
+\newcommand{\ttt}[1]{\text{\tt #1}}
+\newcommand{\ttul}{\texttt{\char 95}}
+\newcommand{\ttcc}{\texttt{:\!:}}
+\newcommand{\ttlb}{{\tt {\char '173}}}
+\newcommand{\ttrb}{{\tt {\char '175}}}
+\newcommand{\tsf}[1]{\textsf{#1}}
+
+% \newcommand{\secref}[1]{\S\ref{sec:#1}}
+% \newcommand{\figref}[1]{Figure~\ref{fig:#1}}
+\newcommand{\marginnote}[1]{\marginpar[$\ast$ {\small #1} $\ast$]%
+ {$\ast$ {\small #1} $\ast$}}
+\newcommand{\hschange}{\marginnote{!Haskell}}
+\newcommand{\TODO}{\emph{TODO}\marginnote{TODO}}
+\newcommand{\parheader}[1]{\textbf{#1}\quad}
+
+\newcommand{\file}{\ensuremath{\mathit{file}}}
+\newcommand{\mapnil}{~\mathord{\not\mapsto}}
+
+\newcommand{\Ckey}[1]{\textbf{\textsf{#1}}}
+\newcommand{\Cent}[1]{\texttt{#1}}
+% \newcommand{\Cmod}[1]{\texttt{[#1]}}
+% \newcommand{\Csig}[1]{\texttt{[\ttcc{}#1]}}
+\newcommand{\Cmod}[1]{=\texttt{[#1]}}
+\newcommand{\Csig}[1]{~\ttcc{}~\texttt{[#1]}}
+\newcommand{\Cpath}[1]{\ensuremath{\mathsf{#1}}}
+\newcommand{\Cvar}[1]{\ensuremath{\mathsf{#1}}}
+\newcommand{\Ccb}[1]{\text{\ttlb} {#1} \text{\ttrb}}
+\newcommand{\Cpkg}[1]{\texttt{#1}}
+\newcommand{\Cmv}[1]{\ensuremath{\langle #1 \rangle}}
+\newcommand{\Cto}[2]{#1 \mapsto #2}
+\newcommand{\Ctoo}[2]{\Cpath{#1} \mapsto \Cpath{#2}}
+\newcommand{\Crm}[1]{#1 \mapnil}
+\newcommand{\Crmm}[1]{\Cpath{#1} \mapnil}
+\newcommand{\Cthin}[1]{\ensuremath{\langle \Ckey{only}~#1 \rangle}}
+\newcommand{\Cthinn}[1]{\ensuremath{\langle \Ckey{only}~\Cpath{#1} \rangle}}
+\newcommand{\Cinc}[1]{\Ckey{include}~{#1}}
+\newcommand{\Cincc}[1]{\Ckey{include}~\Cpkg{#1}}
+\newcommand{\Cshar}[2]{~\Ckey{where}~{#1} \equiv {#2}}
+\newcommand{\Csharr}[2]{~\Ckey{where}~\Cpath{#1} \equiv \Cpath{#2}}
+\newcommand{\Ctshar}[2]{~\Ckey{where}~{#1} \equiv {#2}}
+\newcommand{\Ctsharr}[3]{~\Ckey{where}~\Cpath{#1}.\Cent{#3} \equiv \Cpath{#2}.\Cent{#3}}
+\newcommand{\Cbinds}[1]{\left\{\!\begin{array}{l} #1 \end{array}\!\right\}}
+\newcommand{\Cbindsp}[1]{\left(\!\begin{array}{l} #1 \end{array}\!\right)}
+\newcommand{\Cpkgs}[1]{\[\begin{array}{l} #1\end{array}\]}
+\newcommand{\Cpkgsl}[1]{\noindent\ensuremath{\begin{array}{@{}l} #1\end{array}}}
+\newcommand{\Ccomment}[1]{\ttt{\emph{--~#1}}}
+\newcommand{\Cimp}[1]{\Ckey{import}~\Cpkg{#1}}
+\newcommand{\Cimpas}[2]{\Ckey{import}~\Cpkg{#1}~\Ckey{as}~\Cvar{#2}}
+
+\newcommand{\Ctbinds}[1]{\left\{\!\vrule width 0.6pt \begin{array}{l} #1 \end{array} \vrule width 0.6pt \!\right\}}
+\newcommand{\Ctbindsx}{\left\{\!\vrule width 0.6pt \; \vrule width 0.6pt \!\right\}}
+\newcommand{\Ctbindsxx}{\left\{\!\vrule width 0.6pt \begin{array}{l}\!\!\!\!\\\!\!\!\!\end{array} \vrule width 0.6pt \!\right\}}
+\newcommand{\Ctbindsxxx}{\left\{\!\vrule width 0.6pt \begin{array}{l}\!\!\!\!\\\!\!\!\!\\\!\!\!\!\end{array} \vrule width 0.6pt \!\right\}}
+
+
+\newcommand{\Cpkgdef}[2]{%
+ \ensuremath{
+ \begin{array}{l}
+ \Ckey{package}~\Cpkg{#1}~\Ckey{where}\\
+ \hspace{1em}\begin{array}{l}
+ #2
+ \end{array}
+ \end{array}}}
+\newcommand{\Cpkgdefonly}[3]{%
+ \ensuremath{
+ \begin{array}{l}
+ \Ckey{package}~\Cpkg{#1}\Cvar{(#2)}~\Ckey{where}\\
+ \hspace{1em}\begin{array}{l}
+ #3
+ \end{array}
+ \end{array}}}
+\newcommand{\Ccc}{\mathbin{\ttcc{}}}
+\newcommand{\Cbinmod}[2]{\Cvar{#1} = \texttt{[#2]}}
+\newcommand{\Cbinsig}[2]{\Cvar{#1} \Ccc \texttt{[#2]}}
+\newcommand{\Cinconly}[2]{\Ckey{include}~\Cpkg{#1}\Cvar{(#2)}}
+\newcommand{\Cimponly}[2]{\Ckey{import}~\Cpkg{#1}\Cvar{(#2)}}
+\newcommand{\Cimpmv}[3]{\Ckey{import}~\Cpkg{#1}\langle\Cvar{#2}\mapsto\Cvar{#3}\rangle}
+
+
+
+
+
+\newcommand{\oxb}[1]{\llbracket #1 \rrbracket}
+\newcommand{\coxb}[1]{\{\hspace{-.5ex}| #1 |\hspace{-.5ex}\}}
+\newcommand{\coxbv}[1]{\coxb{\vec{#1}}}
+\newcommand{\angb}[1]{\ensuremath{\boldsymbol\langle #1 \boldsymbol\rangle}\xspace}
+\newcommand{\angbv}[1]{\angb{\vec{#1}}}
+\newcommand{\aoxbl}{\ensuremath{\boldsymbol\langle\hspace{-.5ex}|}}
+\newcommand{\aoxbr}{\ensuremath{|\hspace{-.5ex}\boldsymbol\rangle}\xspace}
+\newcommand{\aoxb}[1]{\ensuremath{\aoxbl{#1}\aoxbr}}
+\newcommand{\aoxbv}[1]{\aoxb{\vec{#1}}}
+\newcommand{\poxb}[1]{\ensuremath{%
+ (\hspace{-.5ex}|%
+ #1%
+ |\hspace{-.5ex})}\xspace}
+\newcommand{\stof}[1]{{#1}^{\star}}
+% \newcommand{\stof}[1]{\ensuremath{\underline{#1}}}
+\newcommand{\sh}[1]{\ensuremath{\tilde{#1}}}
+
+
+% \newenvironment{code}[1][t]%
+% {\ignorespaces\begin{varwidth}[#1]{\textwidth}\begin{alltt}}%
+% {\end{alltt}\end{varwidth}\ignorespacesafterend}
+% \newenvironment{codel}[1][t]%
+% {\noindent\begin{varwidth}[#1]{\textwidth}\noindent\begin{alltt}}%
+% {\end{alltt}\end{varwidth}\ignorespacesafterend}
+
+
+%% hack for subfloats in subfig -------------
+\makeatletter
+\newbox\sf@box
+\newenvironment{SubFloat}[2][]%
+ {\def\sf@one{#1}%
+ \def\sf@two{#2}%
+ \setbox\sf@box\hbox
+ \bgroup}%
+ {\egroup
+ \ifx\@empty\sf@two\@empty\relax
+ \def\sf@two{\@empty}
+ \fi
+ \ifx\@empty\sf@one\@empty\relax
+ \subfloat[\sf@two]{\box\sf@box}%
+ \else
+ \subfloat[\sf@one][\sf@two]{\box\sf@box}%
+ \fi}
+\makeatother
+%% ------------------------------------------
+
+%% hack for top-aligned tabular cells -------------
+\newsavebox\topalignbox
+\newcolumntype{L}{%
+ >{\begin{lrbox}\topalignbox
+ \rule{0pt}{\ht\strutbox}}
+ l
+ <{\end{lrbox}%
+ \raisebox{\dimexpr-\height+\ht\strutbox\relax}%
+ {\usebox\topalignbox}}}
+\newcolumntype{C}{%
+ >{\begin{lrbox}\topalignbox
+ \rule{0pt}{\ht\strutbox}}
+ c
+ <{\end{lrbox}%
+ \raisebox{\dimexpr-\height+\ht\strutbox\relax}%
+ {\usebox\topalignbox}}}
+\newcolumntype{R}{%
+ >{\begin{lrbox}\topalignbox
+ \rule{0pt}{\ht\strutbox}}
+ r
+ <{\end{lrbox}%
+ \raisebox{\dimexpr-\height+\ht\strutbox\relax}%
+ {\usebox\topalignbox}}}
+%% ------------------------------------------------
+
+\newcommand\syn[1]{\textsf{#1}}
+\newcommand\bsyn[1]{\textsf{\bfseries #1}}
+\newcommand\msyn[1]{\textsf{#1}}
+\newcommand{\cc}{\mathop{::}}
+
+% \newcommand{\Eimp}[1]{\bsyn{import}~{#1}}
+% \newcommand{\Eonly}[2]{#1~\bsyn{only}~{#2}}
+% \newcommand{\Ehide}[1]{~\bsyn{hide}~{#1}}
+% \newcommand{\Enew}[1]{\bsyn{new}~{#1}}
+% \newcommand{\Elocal}[2]{\bsyn{local}~{#1}~\bsyn{in}~{#2}}
+% \newcommand{\Smv}[3]{\Emv[]{#1}{#2}{#3}}
+\newcommand{\Srm}[2]{#1 \mathord{\setminus} #2}
+
+\newcommand{\cpath}{\varrho}
+\newcommand{\fpath}{\rho}
+
+\newcommand{\ie}{\emph{i.e.},\xspace}
+\newcommand{\eg}{\emph{e.g.},~}
+\newcommand{\etal}{\emph{et al.}}
+
+\renewcommand{\P}[1]{\Cpkg{#1}}
+\newcommand{\X}[1]{\Cvar{#1}}
+\newcommand{\E}{\mathcal{E}}
+\newcommand{\C}{\mathcal{C}}
+\newcommand{\M}{\mathcal{M}}
+\newcommand{\B}{\mathcal{B}}
+\newcommand{\R}{\mathcal{R}}
+\newcommand{\K}{\mathcal{K}}
+\renewcommand{\L}{\mathcal{L}}
+\newcommand{\D}{\mathcal{D}}
+
+%%%% NEW
+
+\newdateformat{numericdate}{%
+\THEYEAR.\twodigit{\THEMONTH}.\twodigit{\THEDAY}
+}
+
+% EL DEFNS
+\newcommand{\shal}[1]{\syn{shallow}(#1)}
+\newcommand{\exports}[1]{\syn{exports}(#1)}
+\newcommand{\Slocals}[1]{\syn{locals}(#1)}
+\newcommand{\Slocalsi}[2]{\syn{locals}(#1; #2)}
+\newcommand{\specs}[1]{\syn{specs}(#1)}
+\newcommand{\ELmkespc}[2]{\syn{mkespc}(#1;#2)}
+\newcommand{\Smkeenv}[1]{\syn{mkeenv}(#1)}
+\newcommand{\Smklocaleenv}[2]{\syn{mklocaleenv}(#1;#2)}
+\newcommand{\Smklocaleenvespcs}[1]{\syn{mklocaleenv}(#1)}
+\newcommand{\Smkphnms}[1]{\syn{mkphnms}(#1)}
+\newcommand{\Smkphnm}[1]{\syn{mkphnm}(#1)}
+\newcommand{\Sfilterespc}[2]{\syn{filterespc}(#1;#2)}
+\newcommand{\Sfilterespcs}[2]{\syn{filterespcs}(#1;#2)}
+\newcommand{\Simps}[1]{\syn{imps}(#1)}
+
+
+
+% IL DEFNS
+\newcommand{\dexp}{\mathit{dexp}}
+\newcommand{\fexp}{\mathit{fexp}}
+\newcommand{\tfexp}{\mathit{tfexp}}
+\newcommand{\pexp}{\mathit{pexp}}
+\newcommand{\dtyp}{\mathit{dtyp}}
+\newcommand{\ftyp}{\mathit{ftyp}}
+\newcommand{\hsmod}{\mathit{hsmod}}
+\newcommand{\fenv}{\mathit{fenv}}
+\newcommand{\ILmkmod}[6]{\syn{mkmod}(#1; #2; #3; #4; #5; #6)}
+\newcommand{\ILmkstubs}[3]{\syn{mkstubs}(#1; #2; #3)}
+\newcommand{\Smkstubs}[1]{\syn{mkstubs}(#1)}
+\newcommand{\ILentnames}[1]{\syn{entnames}(#1)}
+\newcommand{\ILmkfenv}[1]{\syn{mkfenv}(#1)}
+\newcommand{\ILmkdtyp}[1]{\syn{mkdtyp}(#1)}
+\newcommand{\ILmkknd}[1]{\syn{mkknd}(#1)}
+\newcommand{\ILmkimpdecl}[2]{\syn{mkimpdecl}(#1;#2)}
+\newcommand{\ILmkimpdecls}[2]{\syn{mkimpdecls}(#1;#2)}
+\newcommand{\ILmkimpspec}[3]{\syn{mkimpspec}(#1;#2;#3)}
+\newcommand{\ILmkentimp}[3]{\syn{mkentimp}(#1;#2;#3)}
+\newcommand{\ILmkentimpp}[1]{\syn{mkentimp}(#1)}
+\newcommand{\ILmkexp}[2]{\syn{mkexp}(#1;#2)}
+\newcommand{\ILmkexpdecl}[2]{\syn{mkexpdecl}(#1;#2)}
+\newcommand{\ILmkespc}[2]{\syn{mkespc}(#1;#2)}
+\newcommand{\ILshal}[1]{\syn{shallow}(#1)}
+\newcommand{\ILexports}[1]{\syn{exports}(#1)}
+\newcommand{\ILdefns}[1]{\syn{defns}(#1)}
+\newcommand{\ILdefnsi}[2]{\syn{defns}(#1;#2)}
+
+% CORE DEFNS
+\newcommand{\Hentref}{\mathit{eref}}
+\newcommand{\Hentimp}{\mathit{import}}
+\newcommand{\Hentexp}{\mathit{export}}
+\newcommand{\Himp}{\mathit{impdecl}}
+\newcommand{\Himpspec}{\mathit{impspec}}
+\newcommand{\Himps}{\mathit{impdecls}}
+\newcommand{\Hexps}{\mathit{expdecl}}
+\newcommand{\Hdef}{\mathit{def}}
+\newcommand{\Hdefs}{\mathit{defs}}
+\newcommand{\Hdecl}{\mathit{decl}}
+\newcommand{\Hdecls}{\mathit{decls}}
+\newcommand{\Heenv}{\mathit{eenv}}
+\newcommand{\Haenv}{\mathit{aenv}}
+% \newcommand{\HIL}[1]{{\scriptstyle\downarrow}#1}
+\newcommand{\HIL}[1]{\check{#1}}
+
+\newcommand{\Hcmp}{\sqsubseteq}
+
+\newcommand{\uexp}{\mathit{uexp}}
+\newcommand{\utyp}{\mathit{utyp}}
+\newcommand{\typ}{\mathit{typ}}
+\newcommand{\knd}{\mathit{knd}}
+\newcommand{\kndstar}{\ttt{*}}
+\newcommand{\kndarr}[2]{#1\ensuremath{\mathbin{\ttt{->}}}#2}
+\newcommand{\kenv}{\mathit{kenv}}
+\newcommand{\phnm}{\mathit{phnm}}
+\newcommand{\spc}{\mathit{dspc}}
+\newcommand{\spcs}{\mathit{dspcs}}
+\newcommand{\espc}{\mathit{espc}}
+\newcommand{\espcs}{\mathit{espcs}}
+\newcommand{\ds}{\mathit{ds}}
+
+\newcommand{\shctx}{\sh{\Xi}_{\syn{ctx}}}
+\newcommand{\shctxsigma}{\sh{\Sigma}_{\syn{ctx}}}
+
+\newcommand{\vdashsh}{\Vdash}
+
+% \newcommand{\vdashghc}{\vdash_{\!\!\mathrm{c}}^{\!\!\mathrm{\scriptscriptstyle EL}}}
+% \newcommand{\vdashghcil}{\vdash_{\!\!\mathrm{c}}^{\!\!\mathrm{\scriptscriptstyle IL}}}
+% \newcommand{\vdashshghc}{\vdashsh_{\!\!\mathrm{c}}^{\!\!\mathrm{\scriptscriptstyle EL}}}
+\newcommand{\vdashghc}{\vdash_{\!\!\mathrm{c}}}
+\newcommand{\vdashghcil}{\vdash_{\!\!\mathrm{c}}^{\!\!\mathrm{\scriptscriptstyle IL}}}
+\newcommand{\vdashshghc}{\vdashsh_{\!\!\mathrm{c}}}
+
+% CORE STUFF
+\newcommandx*{\JCModImp}[5][1=\sh\B, 2=\nu_0, usedefault=@]%
+ {#1;#2 \vdashshghc #3;#4 \elabto #5}
+\newcommandx*{\JIlCModImp}[5][1=\fenv, 2=f_0, usedefault=@]%
+ {#1;#2 \vdashghcil #3;#4 \elabto #5}
+\newcommandx*{\JCSigImp}[5][1=\sh\B, 2=\sh\tau, usedefault=@]%
+ {#1;#2 \vdashshghc #3;#4 \elabto #5}
+
+\newcommandx*{\JCImpDecl}[3][1=\sh\B, usedefault=@]%
+ {#1 \vdashshghc #2 \elabto #3}
+\newcommandx*{\JCImp}[4][1=\sh\B, 2=p, usedefault=@]%
+ {#1;#2 \vdashshghc #3 \elabto #4}
+\newcommandx*{\JIlCImpDecl}[3][1=\fenv, usedefault=@]%
+ {#1 \vdashghcil #2 \elabto #3}
+\newcommandx*{\JIlCImp}[4][1=\fenv, 2=f, usedefault=@]%
+ {#1;#2 \vdashghcil #3 \elabto #4}
+
+\newcommandx*{\JCModExp}[4][1=\nu_0, 2=\Heenv, usedefault=@]%
+ {#1;#2 \vdashshghc #3 \elabto #4}
+\newcommandx*{\JIlCModExp}[4][1=f_0, 2=\HIL\Heenv, usedefault=@]%
+ {#1;#2 \vdashghcil #3 \elabto #4}
+
+\newcommandx*{\JCModDef}[5][1=\Psi, 2=\nu_0, 3=\Heenv, usedefault=@]%
+ {#1; #2; #3 \vdashghcil #4 : #5}
+\newcommandx*{\JIlCModDef}[5][1=\fenv, 2=f_0, 3=\HIL\Heenv, usedefault=@]%
+ {#1; #2; #3 \vdashghcil #4 : #5}
+\newcommandx*{\JCSigDecl}[5][1=\Psi, 2=\sh\tau, 3=\Heenv, usedefault=@]%
+ {#1; #2; #3 \vdashghcil #4 : #5}
+
+\newcommandx*{\JCExp}[6][1=\sh\Psi, 2=\nu_0, 3=\Hdefs, 4=\Heenv, usedefault=@]%
+ {#1;#2;#3;#4 \vdashshghc #5 \elabto #6}
+\newcommandx*{\JIlCExp}[4][1=f_0, 2=\HIL\Heenv, usedefault=@]%
+ {#1;#2 \vdashghcil #3 \elabto #4}
+
+\newcommandx*{\JCRefExp}[7][1=\sh\Psi, 2=\nu_0, 3=\Hdefs, 4=\Heenv, usedefault=@]%
+ {#1;#2;#3;#4 \vdashshghc #5 \elabto #6:#7}
+\newcommandx*{\JIlCRefExp}[7][1=\fenv, 2=f_0, 3=\HIL\Hdefs, 4=\HIL\Heenv, usedefault=@]%
+ {#1;#2;#3;#4 \vdashghcil #5 \elabto #6:#7}
+
+\newcommandx*{\JCMod}[4][1=\Gamma, 2=\nu_0, usedefault=@]%
+ {#1; #2 \vdashghc #3 : #4}
+\newcommandx*{\JIlCMod}[3][1=\fenv, usedefault=@]%
+ {#1 \vdashghcil #2 : #3}
+\newcommandx*{\JCSig}[5][1=\Gamma, 2=\sh\tau, usedefault=@]%
+ {#1; #2 \vdashghc #3 \elabto #4;#5}
+\newcommandx*{\JCShSig}[5][1=\Gamma, 2=\sh\tau, usedefault=@]%
+ {#1; #2 \vdashghc #3 \elabto #4;#5}
+\newcommandx*{\JCModElab}[5][1=\Gamma, 2=\nu_0, usedefault=@]%
+ % {#1; #2 \vdashghc #3 : #4 \elabto #5}
+ {#1; #2 \vdashghc #3 : #4 \;\shade{\elabto #5}}
+
+\newcommandx*{\JCWfEenv}[2][1=\Haenv, usedefault=@]%
+ {#1 \vdashshghc #2~\syn{wf}}
+\newcommandx*{\JCWfEenvMap}[2][1=\Haenv, usedefault=@]%
+ {#1 \vdashshghc #2~\syn{wf}}
+\newcommandx*{\JIlCWfEenv}[2][1=\HIL\Haenv, usedefault=@]%
+ {#1 \vdashghcil #2~\syn{wf}}
+\newcommandx*{\JIlCWfEenvMap}[2][1=\HIL\Haenv, usedefault=@]%
+ {#1 \vdashghcil #2~\syn{wf}}
+
+\newcommandx*{\JIlTfexp}[3][1=\fenv, 2=f_0, usedefault=@]%
+ {#1; #2 \vdash #3}
+
+
+
+ % IL STUFF
+
+\newcommandx*{\JIlWf}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlKnd}[4][1=\fenv, 2=\kenv, usedefault=@]%
+ {#1;#2 \vdashghcil #3 \mathrel{\cc} #4}
+% \newcommandx*{\JIlSub}[4][1=\fenv, 2=f, usedefault=@]%
+% {#1;#2 \vdash #3 \le #4}
+\newcommandx*{\JIlSub}[2][usedefault=@]%
+ {\vdash #1 \le #2}
+\newcommandx*{\JIlMerge}[3][usedefault=@]%
+ {\vdash #1 \oplus #2 \Rightarrow #3}
+
+\newcommandx*{\JIlDexp}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2}
+\newcommandx*{\JIlDexpTyp}[3][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 : #3}
+
+\newcommandx*{\JIlWfFenv}[2][1=\nil, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlWfFtyp}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlWfSpc}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlWfESpc}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlWfSig}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JIlWfFtypSpecs}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{specs-wf}}
+\newcommandx*{\JIlWfFtypExps}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{exports-wf}}
+\newcommandx*{\JIlWfFenvDeps}[2][1=\fenv, usedefault=@]%
+ {#1 \vdash #2 ~\syn{deps-wf}}
+
+% WF TYPE STUFF IN EL
+\newcommandx*{\JPkgValid}[1]%
+ {\vdash #1 ~\syn{pkg-valid}}
+\newcommandx*{\JWfPkgCtx}[1][1=\Delta, usedefault=@]%
+ {\vdash #1 ~\syn{wf}}
+\newcommandx*{\JWfPhCtx}[2][1=\nil, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfModTyp}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfModTypPol}[3][1=\Psi, usedefault=@]%
+ {#1 \vdash #2^{#3} ~\syn{wf}}
+\newcommandx*{\JWfLogSig}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfSpc}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfESpc}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfSig}[2][1=\nil, usedefault=@]%
+ {#1 \vdash #2 ~\syn{wf}}
+\newcommandx*{\JWfModTypSpecs}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{specs-wf}}
+\newcommandx*{\JWfModTypPolSpecs}[3][1=\Psi, usedefault=@]%
+ {#1 \vdash #2^{#3} ~\syn{specs-wf}}
+\newcommandx*{\JWfModTypExps}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{exports-wf}}
+\newcommandx*{\JWfPhCtxDeps}[2][1=\Psi, usedefault=@]%
+ {#1 \vdash #2 ~\syn{deps-wf}}
+\newcommandx*{\JWfPhCtxDepsOne}[4][1=\Psi, usedefault=@]%
+ {#1 \vdash \styp{#2}{#3}{#4} ~\syn{deps-wf}}
+
+% WF SHAPE STUFF IN EL
+\newcommandx*{\JWfShPhCtx}[2][1=\nil, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfModSh}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfModShPol}[3][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2^{#3} ~\syn{wf}}
+\newcommandx*{\JWfShLogSig}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfShSpc}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfShESpc}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfShSig}[2][1=\nil, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{wf}}
+\newcommandx*{\JWfModShSpecs}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{specs-wf}}
+\newcommandx*{\JWfModShPolSpecs}[3][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2^{#3} ~\syn{specs-wf}}
+\newcommandx*{\JWfModShExps}[2][1=\sh\Psi, usedefault=@]%
+ {#1 \vdashsh #2 ~\syn{exports-wf}}
+\newcommandx*{\JWfEenv}[4][1=\sh\Psi, 2=\nu_0, 3=\Hdefs, usedefault=@]%
+ {#1;#2;#3 \vdashshghc #4 ~\syn{wf}}
+
+\newcommandx*{\JCoreKnd}[4][1=\Psi, 2=\kenv, usedefault=@]%
+ {#1;#2 \vdashghc #3 \mathrel{\cc} #4}
+
+\newcommandx*{\JStampEq}[2]%
+ {\vdash #1 \equiv #2}
+\newcommandx*{\JStampNeq}[2]%
+ {\vdash #1 \not\equiv #2}
+\newcommandx*{\JUnif}[3]%
+ {\syn{unify}(#1 \doteq #2) \elabto #3}
+\newcommandx*{\JUnifM}[2]%
+ {\syn{unify}(#1) \elabto #2}
+
+\newcommandx*{\JModTypWf}[1]%
+ {\vdash #1 ~\syn{wf}}
+
+\newcommandx*{\JModSub}[2]%
+ {\vdash #1 \le #2}
+\newcommandx*{\JModSup}[2]%
+ {\vdash #1 \ge #2}
+\newcommandx*{\JShModSub}[2]%
+ {\vdashsh #1 \le #2}
+
+\newcommandx*{\JModEq}[2]%
+ {\vdash #1 \equiv #2}
+% \newcommandx*{\JCShModEq}[3][3=\C]%
+% {\vdashsh #1 \equiv #2 \mathbin{|} #3}
+
+\newcommandx*{\JETyp}[4][1=\Gamma, 2=\shctxsigma, usedefault=@]%
+ {#1;#2 \vdash #3 : #4}
+\newcommandx*{\JETypElab}[5][1=\Gamma, 2=\shctxsigma, usedefault=@]%
+ {\JETyp[#1][#2]{#3}{#4} \elabto #5}
+\newcommandx*{\JESh}[3][1=\sh\Gamma, usedefault=@]%
+ {#1 \vdashsh #2 \Rightarrow #3}
+
+\newcommandx*{\JBTyp}[5][1=\Delta, 2=\Gamma, 3=\shctx, usedefault=@]%
+ {#1;#2;#3 \vdash #4 : #5}
+\newcommandx*{\JBTypElab}[6][1=\Delta, 2=\Gamma, 3=\shctx, usedefault=@]%
+ % {\JBTyp[#1][#2][#3]{#4}{#5} \elabto #6}
+ {\JBTyp[#1][#2][#3]{#4}{#5} \;\shade{\elabto #6}}
+\newcommandx*{\JBSh}[4][1=\Delta, 2=\sh\Gamma, usedefault=@]%
+ {#1;#2 \vdashsh #3 \Rightarrow #4}
+
+\newcommandx*{\JBVTyp}[4][1=\Delta, 2=\shctx, usedefault=@]%
+ {#1;#2 \vdash #3 : #4}
+\newcommandx*{\JBVTypElab}[5][1=\Delta, 2=\shctx, usedefault=@]%
+ % {\JBVTyp[#1][#2]{#3}{#4} \elabto #5}
+ {\JBVTyp[#1][#2]{#3}{#4} \;\shade{\elabto #5}}
+\newcommandx*{\JBVSh}[4][1=\Delta, usedefault=@]%
+ {#1 \vdashsh #2 \Rightarrow #3;\, #4}
+
+\newcommandx*{\JImp}[3][1=\Gamma, usedefault=@]%
+ {#1 \vdashimp #2 \elabto #3}
+\newcommandx*{\JShImp}[3][1=\sh\Gamma, usedefault=@]%
+ {#1 \vdashshimp #2 \elabto #3}
+
+\newcommandx*{\JGhcMod}[4]%
+ {#1; #2 \vdashghc #3 : #4}
+\newcommandx*{\JShGhcMod}[4]%
+ {#1; #2 \vdashshghc #3 : #4}
+
+\newcommandx*{\JGhcSig}[5]%
+ {#1; #2 \vdashghc #3 \elabto #4;#5}
+\newcommandx*{\JShGhcSig}[5]%
+ {#1; #2 \vdashshghc #3 \elabto #4;#5}
+
+\newcommandx*{\JThin}[3][1=t, usedefault=@]%
+ {\vdash #2 \xrightarrow{~#1~} #3}
+\newcommandx*{\JShThin}[3][1=t, usedefault=@]%
+ {\vdashsh #2 \xrightarrow{~#1~} #3}
+
+\newcommandx*{\JShMatch}[3][1=\nu, usedefault=@]%
+ {#1 \vdash #2 \sqsubseteq #3}
+
+\newcommandx*{\JShTrans}[4]%
+ {\vdash #1 \le_{#2} #3 \elabto #4}
+
+\newcommandx*{\JMerge}[3]%
+ {\vdash #1 + #2 \Rightarrow #3}
+\newcommandx*{\JShMerge}[5]%
+ {\vdashsh #1 + #2 \Rightarrow #3;\, #4;\, #5}
+\newcommandx*{\JShMergeNew}[4]%
+ {\vdashsh #1 + #2 \Rightarrow #3;\, #4}
+\newcommandx*{\JShMergeSimple}[3]%
+ {\vdashsh #1 + #2 \Rightarrow #3}
+
+\newcommandx*{\JDTyp}[3][1=\Delta, usedefault=@]%
+ {#1 \vdash #2 : #3}
+\newcommandx*{\JDTypElab}[4][1=\Delta, usedefault=@]%
+ % {#1 \vdash #2 : #3 \elabto #4}
+ {#1 \vdash #2 : #3 \;\shade{\elabto #4}}
+
+\newcommandx*{\JTTyp}[2][1=\Delta, usedefault=@]%
+ {#1 \vdash #2}
+
+\newcommandx*{\JSound}[3][1=\Psi_\syn{ctx}, usedefault=@]%
+ {#1 \vdash #2 \sim #3}
+
+\newcommandx*{\JSoundOne}[4][1=\Psi, 2=\fenv, usedefault=@]%
+ {\vdash #3 \sim #4}
+% \newcommand{\Smodi}[4]{\ensuremath{\oxb{=#2 \cc #3 \imps #4}^{#1}}}
+\newcommand{\Smodi}[3]{\ensuremath{\oxb{=#2 \cc #3}^{#1}}}
+\newcommand{\Smod}[2]{\Smodi{+}{#1}{#2}}
+\newcommand{\Ssig}[2]{\Smodi{-}{#1}{#2}}
+\newcommand{\Sreq}[2]{\Smodi{?}{#1}{#2}}
+\newcommand{\Shole}[2]{\Smodi{\circ}{#1}{#2}}
+
+\newcommand{\SSmodi}[2]{\ensuremath{\oxb{=#2}^{#1}}}
+\newcommand{\SSmod}[1]{\SSmodi{+}{#1}}
+\newcommand{\SSsig}[1]{\SSmodi{-}{#1}}
+\newcommand{\SSreq}[1]{\SSmodi{?}{#1}}
+\newcommand{\SShole}[1]{\SSmodi{\circ}{#1}}
+
+% \newcommand{\styp}[3]{\oxb{{#1}\cc{#2}}^{#3}}
+\newcommand{\styp}[3]{{#1}{:}{#2}^{#3}}
+\newcommand{\stm}[2]{\styp{#1}{#2}{\scriptscriptstyle+}}
+\newcommand{\sts}[2]{\styp{#1}{#2}{\scriptscriptstyle-}}
+
+% \newcommand{\mtypsep}{[\!]}
+\newcommand{\mtypsep}{\mbox{$\bm{;}$}}
+\newcommand{\mtypsepsp}{\hspace{.3em}}
+\newcommand{\msh}[3]{\aoxb{#1 ~\mtypsep~ #2 ~\mtypsep~ #3}}
+\newcommand{\mtyp}[3]{
+ \aoxb{\mtypsepsp #1 \mtypsepsp\mtypsep\mtypsepsp
+ #2 \mtypsepsp\mtypsep\mtypsepsp
+ #3 \mtypsepsp}}
+\newcommand{\bigmtyp}[3]{\ensuremath{
+ \left\langle\!\vrule \begin{array}{l}
+ #1 ~\mtypsep \\[0pt]
+ #2 ~\mtypsep \\
+ #3
+ \end{array} \vrule\!\right\rangle
+}}
+
+
+\newcommand{\mtypm}[2]{\mtyp{#1}{#2}^{\scriptstyle+}}
+\newcommand{\mtyps}[2]{\mtyp{#1}{#2}^{\scriptstyle-}}
+\newcommand{\bigmtypm}[2]{\bigmtyp{#1}{#2}^{\scriptstyle+}}
+\newcommand{\bigmtyps}[2]{\bigmtyp{#1}{#2}^{\scriptstyle-}}
+
+\newcommand{\mref}{\ensuremath{\mathit{mref}}}
+\newcommand{\selfpath}{\msyn{Local}}
+
+% \newcommand{\Ltyp}[3]{\oxb{#1 \mathbin{\scriptstyle\MVAt} #2}^{#3}}
+% \newcommand{\Ltyp}[2]{\poxb{#1 \mathbin{\scriptstyle\MVAt} #2}}
+\newcommand{\Ltyp}[2]{#1 {\scriptstyle\MVAt} #2}
+
+\newcommand{\Sshape}[1]{\ensuremath{\syn{shape}(#1)}}
+\newcommand{\Srename}[2]{\ensuremath{\syn{rename}(#1;#2)}}
+\newcommand{\Scons}[2]{\ensuremath{\syn{cons}(#1;#2)}}
+\newcommand{\Smkreq}[1]{\ensuremath{\syn{hide}(#1)}}
+\newcommand{\Sfv}[1]{\ensuremath{\syn{fv}(#1)}}
+\newcommand{\Sdom}[1]{\ensuremath{\syn{dom}(#1)}}
+\newcommand{\Srng}[1]{\ensuremath{\syn{rng}(#1)}}
+\newcommand{\Sdomp}[2]{\ensuremath{\syn{dom}_{#1}(#2)}}
+\newcommand{\Sclos}[1]{\ensuremath{\syn{clos}(#1)}}
+\newcommand{\Scloss}[2]{\ensuremath{\syn{clos}_{#1}(#2)}}
+\newcommand{\Snorm}[1]{\ensuremath{\syn{norm}(#1)}}
+\newcommand{\Sident}[1]{\ensuremath{\syn{ident}(#1)}}
+\newcommand{\Snec}[2]{\ensuremath{\syn{nec}(#1; #2)}}
+\newcommand{\Sprovs}[1]{\ensuremath{\syn{provs}(#1)}}
+\newcommand{\Smkstamp}[2]{\ensuremath{\syn{mkident}(#1; #2)}}
+\newcommand{\Sname}[1]{\ensuremath{\syn{name}(#1)}}
+\newcommand{\Snames}[1]{\ensuremath{\syn{names}(#1)}}
+\newcommand{\Sallnames}[1]{\ensuremath{\syn{allnames}(#1)}}
+\newcommand{\Shassubs}[1]{\ensuremath{\syn{hasSubs}(#1)}}
+\newcommand{\Snooverlap}[1]{\ensuremath{\syn{nooverlap}(#1)}}
+\newcommand{\Sreduce}[2]{\ensuremath{\syn{apply}(#1; #2)}}
+\newcommand{\Smkfenv}[1]{\ensuremath{\syn{mkfenv}(#1)}}
+\newcommand{\Svalidspc}[2]{\ensuremath{\syn{validspc}(#1; #2)}}
+\newcommand{\Srepath}[2]{\ensuremath{\syn{repath}(#1; #2)}}
+\newcommand{\Smksigenv}[2]{\ensuremath{\syn{mksigenv}(#1; #2)}}
+\newcommand{\Smksigshenv}[2]{\ensuremath{\syn{mksigshenv}(#1; #2)}}
+\newcommand{\Squalify}[2]{\ensuremath{\syn{qualify}(#1; #2)}}
+\newcommandx*{\Sdepends}[2][1=\Psi, usedefault=@]%
+ {\ensuremath{\syn{depends}_{#1}(#2)}}
+\newcommandx*{\Sdependss}[3][1=\Psi, 2=N, usedefault=@]%
+ {\ensuremath{\syn{depends}_{#1;#2}(#3)}}
+\newcommandx*{\Sdependsss}[4][1=\Psi, 2=V, 3=\theta, usedefault=@]%
+ {\ensuremath{\syn{depends}_{#1;#2;#3}(#4)}}
+\newcommand{\Snormsubst}[2]{\ensuremath{\syn{norm}(#1; #2)}}
+
+% \newcommand{\Smergeable}[2]{\ensuremath{\syn{mergeable}(#1; #2)}}
+\newcommand{\mdef}{\mathrel{\bot}}
+\newcommand{\Smergeable}[2]{\ensuremath{#1 \mdef #2}}
+
+\newcommand{\Sstamp}[1]{\ensuremath{\syn{stamp}(#1)}}
+\newcommand{\Stype}[1]{\ensuremath{\syn{type}(#1)}}
+
+\newcommand{\Strue}{\ensuremath{\syn{true}}}
+\newcommand{\Sfalse}{\ensuremath{\syn{false}}}
+
+\newcommandx*{\refsstar}[2][1=\nu_0, usedefault=@]%
+ {\ensuremath{\syn{refs}^{\star}}_{#1}(#2)}
+
+\renewcommand{\merge}{\boxplus}
+\newcommand{\meet}{\sqcap}
+
+\newcommand{\Shaslocaleenv}[3]{\ensuremath{\syn{haslocaleenv}(#1;#2;#3)}}
+\newcommand{\MTvalidnewmod}[3]{\ensuremath{\syn{validnewmod}(#1;#2;#3)}}
+\newcommand{\Sdisjoint}[1]{\ensuremath{\syn{disjoint}(#1)}}
+\newcommand{\Sconsistent}[1]{\ensuremath{\syn{consistent}(#1)}}
+\newcommand{\Slocmatch}[2]{\ensuremath{\syn{locmatch}(#1;#2)}}
+\newcommand{\Sctxmatch}[2]{\ensuremath{\syn{ctxmatch}(#1;#2)}}
+\newcommand{\Snolocmatch}[2]{\ensuremath{\syn{nolocmatch}(#1;#2)}}
+\newcommand{\Snoctxmatch}[2]{\ensuremath{\syn{noctxmatch}(#1;#2)}}
+\newcommand{\Sislocal}[2]{\ensuremath{\syn{islocal}(#1;#2)}}
+\newcommand{\Slocalespcs}[2]{\ensuremath{\syn{localespcs}(#1;#2)}}
+
+\newcommand{\Cprod}[1]{\syn{productive}(#1)}
+\newcommand{\Cnil}{\nil}
+\newcommand{\id}{\syn{id}}
+
+\newcommand{\nui}{\nu_{\syn{i}}}
+\newcommand{\taui}{\tau_{\syn{i}}}
+\newcommand{\Psii}{\Psi_{\syn{i}}}
+
+\newcommand{\vis}{\ensuremath{\mathsf{\scriptstyle V}}}
+\newcommand{\hid}{\ensuremath{\mathsf{\scriptstyle H}}}
+
+\newcommand{\taum}[1]{\ensuremath{\tau_{#1}^{m_{#1}}}}
+
+\newcommand{\sigmamod}{\sigma_{\syn{m}}}
+\newcommand{\sigmaprov}{\sigma_{\syn{p}}}
+
+\newcommand{\Svalidsubst}[2]{\ensuremath{\syn{validsubst}(#1;#2)}}
+\newcommand{\Salias}[1]{\ensuremath{\syn{alias}(#1)}}
+\newcommand{\Saliases}[1]{\ensuremath{\syn{aliases}(#1)}}
+\newcommand{\Simp}[1]{\ensuremath{\syn{imp}(#1)}}
+\newcommand{\Styp}[1]{\ensuremath{\syn{typ}(#1)}}
+\newcommand{\Spol}[1]{\ensuremath{\syn{pol}(#1)}}
+
+\newcommand{\stoff}{\stof{(-)}}
+\newcommand{\stheta}{\stof\theta}
+
+
+%%%%%%% FOR THE PAPER!
+\newcommand{\secref}[1]{Section~\ref{sec:#1}}
+\newcommand{\figref}[1]{Figure~\ref{fig:#1}}
+
+% typesetting for module/path names
+\newcommand{\mname}[1]{\textsf{#1}}
+\newcommand{\m}[1]{\mname{#1}}
+
+% typesetting for package names
+\newcommand{\pname}[1]{\textsf{#1}}
+
+\newcommand{\kpm}[2]{\angb{\pname{#1}.#2}}
+
+% for core entities
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\core}[1]{\texttt{#1}}
+
+\newcommand{\req}{\bsyn{req}}
+\newcommand{\hiding}[1]{\req~\m{#1}}
+
+\newcommand{\Emod}[1]{\ensuremath{[#1]}}
+\newcommand{\Esig}[1]{\ensuremath{[\cc#1]}}
+\newcommand{\Epkg}[2]{\bsyn{package}~\pname{#1}~\bsyn{where}~{#2}}
+% \newcommand{\Epkgt}[3]{\bsyn{package}~{#1}~\bsyn{only}~{#2}~\bsyn{where}~{#3}}
+\newcommand{\Epkgt}[3]{\bsyn{package}~\pname{#1}~{#2}~\bsyn{where}~{#3}}
+\newcommand{\Einc}[1]{\bsyn{include}~\pname{#1}}
+% \newcommand{\Einct}[2]{\bsyn{include}~{#1}~\bsyn{only}~{#2}}
+% \newcommand{\Einctr}[3]{\bsyn{include}~{#1}~\bsyn{only}~{#2}~{#3}}
+\newcommand{\Einct}[2]{\bsyn{include}~\pname{#1}~(#2)}
+\newcommand{\Eincr}[2]{\bsyn{include}~\pname{#1}~\angb{#2}}
+\newcommand{\Einctr}[3]{\bsyn{include}~\pname{#1}~(#2)~\angb{#3}}
+\newcommand{\Emv}[2]{#1 \mapsto #2}
+\newcommand{\Emvp}[2]{\m{#1} \mapsto \m{#2}}
+\newcommand{\Etr}[3][~]{{#2}{#1}\langle #3 \rangle}
+\newcommand{\Erm}[3][~]{{#2}{#1}\langle #3 \mapnil \rangle}
+\newcommand{\Ethin}[1]{(#1)}
+\newcommand{\Ethinn}[2]{(#1; #2)}
+
+
+% \newcommand{\Pdef}[2]{\ensuremath{\begin{array}{l} \Phead{#1} #2\end{array}}}
+% \newcommand{\Phead}[1]{\bsyn{package}~\pname{#1}~\bsyn{where} \\}
+% \newcommand{\Pbndd}[2]{\hspace{1em}{#1} = {#2} \\}
+% \newcommand{\Pbnd}[2]{\hspace{1em}\mname{#1} = {#2} \\}
+% \newcommand{\Pref}[2]{\hspace{1em}\mname{#1} = \mname{#2} \\}
+% \newcommand{\Pmod}[2]{\hspace{1em}\mname{#1} = [\code{#2}] \\}
+% \newcommand{\Psig}[2]{\hspace{1em}\mname{#1} \cc [\code{#2}] \\}
+\newcommand{\Pdef}[2]{\ensuremath{
+ \begin{array}{@{\hspace{1em}}L@{\;\;}c@{\;\;}l}
+ \multicolumn{3}{@{}l}{\Phead{#1}} \\
+ #2
+ \end{array}
+}}
+\newcommand{\Pdeft}[3]{\ensuremath{
+ \begin{array}{@{\hspace{1em}}L@{\;\;}c@{\;\;}l}
+ \multicolumn{3}{@{}l}{\Pheadt{#1}{#2}} \\
+ #3
+ \end{array}
+}}
+\newcommand{\Phead}[1]{\bsyn{package}~\pname{#1}~\bsyn{where}}
+\newcommand{\Pheadt}[2]{\bsyn{package}~\pname{#1}~(#2)~\bsyn{where}}
+\newcommand{\Pbnd}[2]{#1 &=& #2 \\}
+\newcommand{\Pref}[2]{\mname{#1} &=& \mname{#2} \\}
+\newcommand{\Pmod}[2]{\mname{#1} &=& [\code{#2}] \\}
+\newcommand{\Pmodd}[2]{\mname{#1} &=& #2 \\}
+\newcommand{\Psig}[2]{\mname{#1} &\cc& [\code{#2}] \\}
+\newcommand{\Psigg}[2]{\mname{#1} &\cc& #2 \\}
+\newcommand{\Pmulti}[1]{\multicolumn{3}{@{\hspace{1em}}l} {#1} \\}
+\newcommand{\Pinc}[1]{\Pmulti{\Einc{#1}}}
+\newcommand{\Pinct}[2]{\Pmulti{\Einct{#1}{#2}}}
+\newcommand{\Pincr}[2]{\Pmulti{\Eincr{#1}{#2}}}
+\newcommand{\Pinctr}[3]{\Pmulti{\Einctr{#1}{#2}{#3}}}
+\newcommand{\Pmodbig}[2]{\mname{#1} &=& \left[
+ \begin{codeblock}
+ #2
+ \end{codeblock}
+\right] \\}
+\newcommand{\Psigbig}[2]{\mname{#1} &\cc& \left[
+ \begin{codeblock}
+ #2
+ \end{codeblock}
+\right] \\}
+
+\newcommand{\Mimp}[1]{\msyn{import}~\mname{#1}}
+\newcommand{\Mimpq}[1]{\msyn{import}~\msyn{qualified}~\mname{#1}}
+\newcommand{\Mimpas}[2]{\msyn{import}~\mname{#1}~\msyn{as}~\mname{#2}}
+\newcommand{\Mimpqas}[2]{\msyn{import}~\msyn{qualified}~\mname{#1}~\msyn{as}~\mname{#2}}
+\newcommand{\Mexp}[1]{\msyn{export}~(#1)}
+
+\newcommand{\illtyped}{\hfill ($\times$) \; ill-typed}
+
+\newenvironment{example}[1][LL]%
+ {\ignorespaces \begin{flushleft}\begin{tabular}{@{\hspace{1em}}#1} }%
+ {\end{tabular}\end{flushleft} \ignorespacesafterend}
+
+\newenvironment{counterexample}[1][LL]%
+ {\ignorespaces \begin{flushleft}\begin{tabular}{@{\hspace{1em}}#1} }%
+ {& \text{\illtyped} \end{tabular}\end{flushleft} \ignorespacesafterend}
+
+\newenvironment{codeblock}%
+ {\begin{varwidth}{\textwidth}\begin{alltt}}%
+ {\end{alltt}\end{varwidth}}
+
+\newcommand{\fighead}{\hrule\vspace{1.5ex}}
+\newcommand{\figfoot}{\vspace{1ex}\hrule}
+\newenvironment{myfig}{\fighead\small}{\figfoot}
+
+\newcommand{\Mhead}[2]{\syn{module}~{#1}~\syn{(}{#2}\syn{)}~\syn{where}}
+\newcommand{\Mdef}[3]{\ensuremath{
+ \begin{array}{@{\hspace{1em}}L}
+ \multicolumn{1}{@{}L}{\Mhead{#1}{\core{#2}}} \\
+ #3
+ \end{array}
+}}
+
+\newcommand{\HMstof}[1]{\ensuremath{#1}}
+% \newcommand{\HMstof}[1]{\ensuremath{\lfloor #1 \rfloor}}
+% \newcommand{\HMstof}[1]{\ensuremath{\underline{#1}}}
+% \newcommand{\HMstof}[1]{{#1}^{\star}}
+\newcommand{\HMhead}[2]{\syn{module}~\(\HMstof{#1}\)~\syn{(}{#2}\syn{)}~\syn{where}}
+\newcommand{\HMdef}[3]{\ensuremath{
+ \begin{array}{@{\hspace{1em}}L}
+ \multicolumn{1}{@{}L}{\HMhead{#1}{\core{#2}}} \\
+ #3
+ \end{array}
+}}
+\newcommand{\HMimpas}[3]{%
+ \msyn{import}~\ensuremath{\HMstof{#1}}~%
+ \msyn{as}~\mname{#2}~\msyn{(}\core{#3}\msyn{)}}
+\newcommand{\HMimpqas}[3]{%
+ \msyn{import}~\msyn{qualified}~\ensuremath{\HMstof{#1}}~%
+ \msyn{as}~\mname{#2}~\msyn{(}\core{#3}\msyn{)}}
+
+\newcommand{\stackedenv}[2][c]{\ensuremath{
+ \begin{array}{#1}
+ #2
+ \end{array}
+}}
+
+% \renewcommand{\nil}{\mathsf{nil}}
+\renewcommand{\nil}{\mathrel\emptyset}
+
+% \newcommand{\ee}{\mathit{ee}}
+\newcommand{\ee}{\mathit{dent}}
+
+\renewcommand{\gets}{\mathbin{\coloneqq}}
\ No newline at end of file
diff --git a/docs/backpack/commands-rebindings.tex b/docs/backpack/commands-rebindings.tex
new file mode 100644
index 000000000000..96ad2bb2cc30
--- /dev/null
+++ b/docs/backpack/commands-rebindings.tex
@@ -0,0 +1,57 @@
+
+
+%% hide the full syntax of shapes/types for the paper
+\newcommand{\fullmsh}[3]{\aoxb{#1 ~\mtypsep~ #2 ~\mtypsep~ #3}}
+\newcommand{\fullmtyp}[3]{
+ \aoxb{\mtypsepsp #1 \mtypsepsp\mtypsep\mtypsepsp
+ #2 \mtypsepsp\mtypsep\mtypsepsp
+ #3 \mtypsepsp}}
+\newcommand{\fullbigmtyp}[3]{\ensuremath{
+ \left\langle\!\vrule \begin{array}{l}
+ #1 ~\mtypsep \\[0pt]
+ #2 ~\mtypsep \\
+ #3
+ \end{array} \vrule\!\right\rangle
+}}
+\renewcommand{\msh}[2]{\aoxb{#1 \mtypsepsp\mtypsep\mtypsepsp #2}}
+\renewcommand{\mtyp}[2]{
+ \aoxb{#1 ~\mtypsep~ #2}}
+\newcommand{\mtypstretch}[2]{
+ \left\langle\!\vrule
+ \mtypsepsp #1 \mtypsepsp\mtypsep\mtypsepsp #2 \mtypsepsp
+ \vrule\!\right\rangle
+}
+\renewcommand{\bigmtyp}[2]{\ensuremath{
+ \left\langle\!\vrule \begin{array}{l}
+ #1 ~\mtypsep \\[0pt] #2
+ \end{array} \vrule\!\right\rangle
+}}
+
+
+
+%% change syntax of signatures
+\renewcommand{\Esig}[1]{\ensuremath{\,[#1]}}
+
+\renewcommandx*{\JBVSh}[3][1=\Delta, usedefault=@]%
+ {#1 \vdashsh #2 \Rightarrow #3}
+
+
+% JUDGMENTS
+\renewcommandx*{\JBTypElab}[6][1=\Delta, 2=\Gamma, 3=\shctx, usedefault=@]%
+ % {\JBTyp[#1][#2][#3]{#4}{#5} \elabto #6}
+ {\JBTyp[#1][#2][#3]{#4}{#5} \;\shade{\elabto #6}}
+\renewcommandx*{\JBVTypElab}[5][1=\Delta, 2=\shctx, usedefault=@]%
+ % {\JBVTyp[#1][#2]{#3}{#4} \elabto #5}
+ {\JBVTyp[#1][#2]{#3}{#4} \;\shade{\elabto #5}}
+\renewcommandx*{\JDTypElab}[4][1=\Delta, usedefault=@]%
+ % {#1 \vdash #2 : #3 \elabto #4}
+ {#1 \vdash #2 : #3 \;\shade{\elabto #4}}
+\renewcommandx*{\JCModElab}[5][1=\Gamma, 2=\nu_0, usedefault=@]%
+ % {#1; #2 \vdashghc #3 : #4 \elabto #5}
+ {#1; #2 \vdashghc #3 : #4 \;\shade{\elabto #5}}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "paper"
+%%% End:
diff --git a/docs/backpack/diagrams.pdf b/docs/backpack/diagrams.pdf
new file mode 100644
index 000000000000..a50916b23469
Binary files /dev/null and b/docs/backpack/diagrams.pdf differ
diff --git a/docs/backpack/diagrams.xoj b/docs/backpack/diagrams.xoj
new file mode 100644
index 000000000000..acec8d02de7f
Binary files /dev/null and b/docs/backpack/diagrams.xoj differ
diff --git a/docs/backpack/pkgdb.png b/docs/backpack/pkgdb.png
new file mode 100644
index 000000000000..9779444b42e6
Binary files /dev/null and b/docs/backpack/pkgdb.png differ
diff --git a/docs/coding-style.html b/docs/coding-style.html
index 37aaf8dd46b9..6be9263d974c 100644
--- a/docs/coding-style.html
+++ b/docs/coding-style.html
@@ -324,7 +324,7 @@
-
- The GHC Commentary - Other Sources of Wisdom
-
-
-
-
Other Sources of Wisdom
-
- Believe it or not, but there are other people besides you who are
- masochistic enough to study the innards of the beast. Some of the have
- been kind (or cruel?) enough to share their insights with us. Here is a
- probably incomplete list:
-
-
-
-
The STG
- Survival Sheet has -- according to its header -- been written by
- `a poor wee soul',1 which
- probably has been pushed into the torments of madness by the very
- act of contemplating the inner workings of the STG runtime system.
- This document discusses GHC's runtime system with a focus on
- support for parallel processing (aka GUM).
-
-
Andrew Tolmach has defined
- an external
- representation of
- GHC's Core language and also implemented a GHC pass
- that emits the intermediate form into .hcr files. The
- option -fext-core triggers GHC to emit Core code after
- optimisation; in addition, -fno-code is often used to
- stop compilation after Core has been emitted.
-
-
-
-
-
-
- 1Usually reliable sources have it that
- the poor soul in question is no one less than GUM hardcore hacker Hans-Wolfgang Loidl.
-
-
-
- The GHC Commentary - why we have ForeignPtr
-
-
-
-
-
On why we have ForeignPtr
-
-
Unfortunately it isn't possible to add a finalizer to a normal
- Ptr a. We already have a generic finalization mechanism:
- see the Weak module in package lang. But the only reliable way to
- use finalizers is to attach one to an atomic heap object - that
- way the compiler's optimiser can't interfere with the lifetime of
- the object.
-
-
The Ptr type is really just a boxed address - it's
- defined like
-
-
-data Ptr a = Ptr Addr#
-
-
-
where Addr# is an unboxed native address (just a 32-
- or 64- bit word). Putting a finalizer on a Ptr is
- dangerous, because the compiler's optimiser might remove the box
- altogether.
-
-
ForeignPtr is defined like this
-
-
-data ForeignPtr a = ForeignPtr ForeignObj#
-
-
-
where ForeignObj# is a *boxed* address, it corresponds
- to a real heap object. The heap object is primitive from the
- point of view of the compiler - it can't be optimised away. So it
- works to attach a finalizer to the ForeignObj# (but not
- to the ForeignPtr!).
-
-
There are several primitive objects to which we can attach
- finalizers: MVar#, MutVar#, ByteArray#,
- etc. We have special functions for some of these: eg.
- MVar.addMVarFinalizer.
-
-
So a nicer interface might be something like
-
-
-class Finalizable a where
- addFinalizer :: a -> IO () -> IO ()
-
-instance Finalizable (ForeignPtr a) where ...
-instance Finalizable (MVar a) where ...
-
-
-
So you might ask why we don't just get rid of Ptr and
- rename ForeignPtr to Ptr. The reason for that
- is just efficiency, I think.
-
-
The GHC Commentary - Supporting multi-threaded interoperation
-
-
-Authors: sof@galois.com, simonmar@microsoft.com
-Date: April 2002
-
-
-
-This document presents the implementation of an extension to
-Concurrent Haskell that provides two enhancements:
-
-
-
A Concurrent Haskell thread may call an external (e.g., C)
-function in a manner that's transparent to the execution/evaluation of
-other Haskell threads. Section Calling out" covers this.
-
-When a Concurrent Haskell(CH) thread calls a 'foreign import'ed
-function, the runtime system(RTS) has to handle this in a manner
-transparent to other CH threads. That is, they shouldn't be blocked
-from making progress while the CH thread executes the external
-call. Presently, all threads will block.
-
-
-Clearly, we have to rely on OS-level threads in order to support this
-kind of concurrency. The implementation described here defines the
-(abstract) OS threads interface that the RTS assumes. The implementation
-currently provides two instances of this interface, one for POSIX
-threads (pthreads) and one for the Win32 threads.
-
-
-
-
Multi-threading the RTS
-
-
-A simple and efficient way to implement non-blocking foreign calls is like this:
-
-
Invariant: only one OS thread is allowed to
-execute code inside of the GHC runtime system. [There are alternate
-designs, but I won't go into details on their pros and cons here.]
-We'll call the OS thread that is currently running Haskell threads
-the Current Haskell Worker Thread.
-
-The Current Haskell Worker Thread repeatedly grabs a Haskell thread, executes it until its
-time-slice expires or it blocks on an MVar, then grabs another, and executes
-that, and so on.
-
-
-
-When the Current Haskell Worker comes to execute a potentially blocking 'foreign
-import', it leaves the RTS and ceases being the Current Haskell Worker, but before doing so it makes certain that
-another OS worker thread is available to become the Current Haskell Worker.
-Consequently, even if the external call blocks, the new Current Haskell Worker
-continues execution of the other Concurrent Haskell threads.
-When the external call eventually completes, the Concurrent Haskell
-thread that made the call is passed the result and made runnable
-again.
-
-
-
-A pool of OS threads are constantly trying to become the Current Haskell Worker.
-Only one succeeds at any moment. If the pool becomes empty, the RTS creates more workers.
-
-The OS worker threads are regarded as interchangeable. A given Haskell thread
-may, during its lifetime, be executed entirely by one OS worker thread, or by more than one.
-There's just no way to tell.
-
-
If a foreign program wants to call a Haskell function, there is always a thread switch involved.
-The foreign program uses thread-safe mechanisms to create a Haskell thread and make it runnable; and
-the current Haskell Worker Thread exectutes it. See Section Calling in.
-
-
-The rest of this section describes the mechanics of implementing all
-this. There's two parts to it, one that describes how a native (OS) thread
-leaves the RTS to service the external call, the other how the same
-thread handles returning the result of the external call back to the
-Haskell thread.
-
-
-
-
Making the external call
-
-
-Presently, GHC handles 'safe' C calls by effectively emitting the
-following code sequence:
-
-After having squirreled away the state of a Haskell thread,
-Schedule.c:suspendThread() is called which puts the current
-thread on a list [Schedule.c:suspended_ccalling_threads]
-containing threads that are currently blocked waiting for external calls
-to complete (this is done for the purposes of finding roots when
-garbage collecting).
-
-
-
-In addition to putting the Haskell thread on
-suspended_ccalling_threads, suspendThread() now also
-does the following:
-
-
-
Instructs the Task Manager to make sure that there's a
-another native thread waiting in the wings to take over the execution
-of Haskell threads. This might entail creating a new
-worker thread or re-using one that's currently waiting for
-more work to do. The Task Manager section
-presents the functionality provided by this subsystem.
-
-
-
Releases its capability to execute within the RTS. By doing
-so, another worker thread will become unblocked and start executing
-code within the RTS. See the Capability
-section for details.
-
-
-
suspendThread() returns a token which is used to
-identify the Haskell thread that was added to
-suspended_ccalling_threads. This is done so that once the
-external call has completed, we know what Haskell thread to pull off
-the suspended_ccalling_threads list.
-
-
-
-
-Upon return from suspendThread(), the OS thread is free of
-its RTS executing responsibility, and can now invoke the external
-call. Meanwhile, the other worker thread that have now gained access
-to the RTS will continue executing Concurrent Haskell code. Concurrent
-'stuff' is happening!
-
-
-
-
Returning the external result
-
-
-When the native thread eventually returns from the external call,
-the result needs to be communicated back to the Haskell thread that
-issued the external call. The following steps takes care of this:
-
-
-
-
The returning OS thread calls Schedule.c:resumeThread(),
-passing along the token referring to the Haskell thread that made the
-call we're returning from.
-
-
-
-The OS thread then tries to grab hold of a returning worker
-capability, via Capability.c:grabReturnCapability().
-Until granted, the thread blocks waiting for RTS permissions. Clearly we
-don't want the thread to be blocked longer than it has to, so whenever
-a thread that is executing within the RTS enters the Scheduler (which
-is quite often, e.g., when a Haskell thread context switch is made),
-it checks to see whether it can give up its RTS capability to a
-returning worker, which is done by calling
-Capability.c:yieldToReturningWorker().
-
-
-
-If a returning worker is waiting (the code in Capability.c
-keeps a counter of the number of returning workers that are currently
-blocked waiting), it is woken up and the given the RTS execution
-priviledges/capabilities of the worker thread that gave up its.
-
-
-
-The thread that gave up its capability then tries to re-acquire
-the capability to execute RTS code; this is done by calling
-Capability.c:waitForWorkCapability().
-
-
-
-The returning worker that was woken up will continue execution in
-resumeThread(), removing its associated Haskell thread
-from the suspended_ccalling_threads list and start evaluating
-that thread, passing it the result of the external call.
-
-
-
-
-
RTS execution
-
-
-If a worker thread inside the RTS runs out of runnable Haskell
-threads, it goes to sleep waiting for the external calls to complete.
-It does this by calling waitForWorkCapability
-
-
-
-The availability of new runnable Haskell threads is signalled when:
-
-
-
-
When an external call is set up in suspendThread().
-
When a new Haskell thread is created (e.g., whenever
-Concurrent.forkIO is called from within Haskell); this is
-signalled in Schedule.c:scheduleThread_().
-
-
Whenever a Haskell thread is removed from a 'blocking queue'
-attached to an MVar (only?).
-
-
-
-
-
Calling in
-
-Providing robust support for having multiple OS threads calling into
-Haskell is not as involved as its dual.
-
-
-
The OS thread issues the call to a Haskell function by going via
-the Rts API (as specificed in RtsAPI.h).
-
Making the function application requires the construction of a
-closure on the heap. This is done in a thread-safe manner by having
-the OS thread lock a designated block of memory (the 'Rts API' block,
-which is part of the GC's root set) for the short period of time it
-takes to construct the application.
-
The OS thread then creates a new Haskell thread to execute the
-function application, which (eventually) boils down to calling
-Schedule.c:createThread()
-
-Evaluation is kicked off by calling Schedule.c:scheduleExtThread(),
-which asks the Task Manager to possibly create a new worker (OS)
-thread to execute the Haskell thread.
-
-After the OS thread has done this, it blocks waiting for the
-Haskell thread to complete the evaluation of the Haskell function.
-
-The reason why a separate worker thread is made to evaluate the Haskell
-function and not the OS thread that made the call-in via the
-Rts API, is that we want that OS thread to return as soon as possible.
-We wouldn't be able to guarantee that if the OS thread entered the
-RTS to (initially) just execute its function application, as the
-Scheduler may side-track it and also ask it to evaluate other Haskell threads.
-
-
-
-
-Note: As of 20020413, the implementation of the RTS API
-only serializes access to the allocator between multiple OS threads wanting
-to call into Haskell (via the RTS API.) It does not coordinate this access
-to the allocator with that of the OS worker thread that's currently executing
-within the RTS. This weakness/bug is scheduled to be tackled as part of an
-overhaul/reworking of the RTS API itself.
-
-
-
-
Subsystems introduced/modified
-
-
-These threads extensions affect the Scheduler portions of the runtime
-system. To make it more manageable to work with, the changes
-introduced a couple of new RTS 'sub-systems'. This section presents
-the functionality and API of these sub-systems.
-
-
-
-
Capabilities
-
-
-A Capability represent the token required to execute STG code,
-and all the state an OS thread/task needs to run Haskell code:
-its STG registers, a pointer to its TSO, a nursery etc. During
-STG execution, a pointer to the capabilitity is kept in a
-register (BaseReg).
-
-
-Only in an SMP build will there be multiple capabilities, for
-the threaded RTS and other non-threaded builds, there is only
-one global capability, namely MainCapability.
-
-
grabReturnCapability() is called by worker threads
-returning from an external call. It blocks them waiting to gain
-permissions to do so.
-
-
waitForWorkCapability() is called by worker threads
-already inside the RTS, but without any work to do. It blocks them
-waiting for there to new work to become available.
-
-
releaseCapability() hands back a capability. If a
-'returning worker' is waiting, it is signalled that a capability
-has become available. If not, releaseCapability() tries
-to signal worker threads that are blocked waiting inside
-waitForWorkCapability() that new work might now be
-available.
-
-
yieldToReturningWorker() is called by the worker thread
-that's currently inside the Scheduler. It checks whether there are other
-worker threads waiting to return from making an external call. If so,
-they're given preference and a capability is transferred between worker
-threads. One of the waiting 'returning worker' threads is signalled and made
-runnable, with the other, yielding, worker blocking to re-acquire
-a capability.
-
-
-
-The condition variables used to implement the synchronisation between
-worker consumers and providers are local to the Capability
-implementation. See source for details and comments.
-
-
-
-
The Task Manager
-
-
-The Task Manager API is responsible for managing the creation of
-OS worker RTS threads. When a Haskell thread wants to make an
-external call, the Task Manager is asked to possibly create a
-new worker thread to take over the RTS-executing capability of
-the worker thread that's exiting the RTS to execute the external call.
-
-
-The Capability subsystem keeps track of idle worker threads, so
-making an informed decision about whether or not to create a new OS
-worker thread is easy work for the task manager. The Task manager
-provides the following API:
-
startTaskManager() and stopTaskManager() starts
-up and shuts down the subsystem. When starting up, you have the option
-to limit the overall number of worker threads that can be
-created. An unbounded (modulo OS thread constraints) number of threads
-is created if you pass '0'.
-
startTask() is called when a worker thread calls
-suspendThread() to service an external call, asking another
-worker thread to take over its RTS-executing capability. It is also
-called when an external OS thread invokes a Haskell function via the
-Rts API.
-
-
-
-
Native threads API
-
-To hide OS details, the following API is used by the task manager and
-the scheduler to interact with an OS' threads API:
-
-
-
-To signal that you want an external call to be serviced by a separate
-OS thread, you have to add the attribute threadsafe to
-a foreign import declaration, i.e.,
-
-
-The distinction between 'safe' and thread-safe C calls is made
-so that we may call external functions that aren't re-entrant but may
-cause a GC to occur.
-
-The threadsafe attribute subsumes safe.
-
-
-
-
Building the GHC RTS
-
-The multi-threaded extension isn't currently enabled by default. To
-have it built, you need to run the fptools configure script
-with the extra option --enable-threaded-rts turned on, and
-then proceed to build the compiler as per normal.
-
-
-
- Last modified: Wed Apr 10 14:21:57 Pacific Daylight Time 2002
-
-
-
diff --git a/docs/comm/rts-libs/non-blocking.html b/docs/comm/rts-libs/non-blocking.html
deleted file mode 100644
index 627bde8d8887..000000000000
--- a/docs/comm/rts-libs/non-blocking.html
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
-
-
- The GHC Commentary - Non-blocking I/O on Win32
-
-
-
-
The GHC Commentary - Non-blocking I/O on Win32
-
-
-This note discusses the implementation of non-blocking I/O on
-Win32 platforms. It is not implemented yet (Apr 2002), but it seems worth
-capturing the ideas. Thanks to Sigbjorn for writing them.
-
-
Background
-
-GHC has provided non-blocking I/O support for Concurrent Haskell
-threads on platforms that provide 'UNIX-style' non-blocking I/O for
-quite a while. That is, platforms that let you alter the property of a
-file descriptor to instead of having a thread block performing an I/O
-operation that cannot be immediately satisfied, the operation returns
-back a special error code (EWOULDBLOCK.) When that happens, the CH
-thread that made the blocking I/O request is put into a blocked-on-IO
-state (see Foreign.C.Error.throwErrnoIfRetryMayBlock). The RTS will
-in a timely fashion check to see whether I/O is again possible
-(via a call to select()), and if it is, unblock the thread & have it
-re-try the I/O operation. The result is that other Concurrent Haskell
-threads won't be affected, but can continue operating while a thread
-is blocked on I/O.
-
-Non-blocking I/O hasn't been supported by GHC on Win32 platforms, for
-the simple reason that it doesn't provide the OS facilities described
-above.
-
-
Win32 non-blocking I/O, attempt 1
-
-Win32 does provide something select()-like, namely the
-WaitForMultipleObjects() API. It takes an array of kernel object
-handles plus a timeout interval, and waits for either one (or all) of
-them to become 'signalled'. A handle representing an open file (for
-reading) becomes signalled once there is input available.
-
-So, it is possible to observe that I/O is possible using this
-function, but not whether there's "enough" to satisfy the I/O request.
-So, if we were to mimic select() usage with WaitForMultipleObjects(),
-we'd correctly avoid blocking initially, but a thread may very well
-block waiting for their I/O requests to be satisified once the file
-handle has become signalled. [There is a fix for this -- only read
-and write one byte at a the time -- but I'm not advocating that.]
-
-
-
Win32 non-blocking I/O, attempt 2
-
-Asynchronous I/O on Win32 is supported via 'overlapped I/O'; that is,
-asynchronous read and write requests can be made via the ReadFile() /
-WriteFile () APIs, specifying position and length of the operation.
-If the I/O requests cannot be handled right away, the APIs won't
-block, but return immediately (and report ERROR_IO_PENDING as their
-status code.)
-
-The completion of the request can be reported in a number of ways:
-
-
synchronously, by blocking inside Read/WriteFile(). (this is the
- non-overlapped case, really.)
-
-
-
as part of the overlapped I/O request, pass a HANDLE to an event
- object. The I/O system will signal this event once the request
- completed, which a waiting thread will then be able to see.
-
-
-
by supplying a pointer to a completion routine, which will be
- called as an Asynchronous Procedure Call (APC) whenever a thread
- calls a select bunch of 'alertable' APIs.
-
-
-
by associating the file handle with an I/O completion port. Once
- the request completes, the thread servicing the I/O completion
- port will be notified.
-
-The use of I/O completion port looks the most interesting to GHC,
-as it provides a central point where all I/O requests are reported.
-
-Note: asynchronous I/O is only fully supported by OSes based on
-the NT codebase, i.e., Win9x don't permit async I/O on files and
-pipes. However, Win9x does support async socket operations, and
-I'm currently guessing here, console I/O. In my view, it would
-be acceptable to provide non-blocking I/O support for NT-based
-OSes only.
-
-Here's the design I currently have in mind:
-
-
Upon startup, an RTS helper thread whose only purpose is to service
- an I/O completion port, is created.
-
-
All files are opened in 'overlapping' mode, and associated
- with an I/O completion port.
-
-
Overlapped I/O requests are used to implement read() and write().
-
-
If the request cannot be satisified without blocking, the Haskell
- thread is put on the blocked-on-I/O thread list & a re-schedule
- is made.
-
-
When the completion of a request is signalled via the I/O completion
- port, the RTS helper thread will move the associated Haskell thread
- from the blocked list onto the runnable list. (Clearly, care
- is required here to have another OS thread mutate internal Scheduler
- data structures.)
-
-
-
In the event all Concurrent Haskell threads are blocked waiting on
- I/O, the main RTS thread blocks waiting on an event synchronisation
- object, which the helper thread will signal whenever it makes
- a Haskell thread runnable.
-
-
-
-I might do the communication between the RTS helper thread and the
-main RTS thread differently though: rather than have the RTS helper
-thread manipluate thread queues itself, thus requiring careful
-locking, just have it change a bit on the relevant TSO, which the main
-RTS thread can check at regular intervals (in some analog of
-awaitEvent(), for example).
-
-
- The standard Haskell Prelude as well as GHC's Prelude extensions are
- constructed from GHC's primitives in a
- couple of layers.
-
-
PrelBase.lhs
-
- Some most elementary Prelude definitions are collected in PrelBase.lhs.
- In particular, it defines the boxed versions of Haskell primitive types
- - for example, Int is defined as
-
-data Int = I# Int#
-
-
- Saying that a boxed integer Int is formed by applying the
- data constructor I# to an unboxed integer of type
- Int#. Unboxed types are hardcoded in the compiler and
- exported together with the primitive
- operations understood by GHC.
-
- GHC's uses a many optimisations and GHC specific techniques (unboxed
- values, RULES pragmas, and so on) to make the heavily used Prelude code
- as fast as possible.
-
-
-
Par, seq, and lazy
-
- In GHC.Conc you will dinf
-
- pseq a b = a `seq` lazy b
-
- What's this "lazy" thing. Well, pseq is a seq for a parallel setting.
- We really mean "evaluate a, then b". But if the strictness analyser sees that pseq is strict
- in b, then b might be evaluated before a, which is all wrong.
-
-Solution: wrap the 'b' in a call to GHC.Base.lazy. This function is just the identity function,
-except that it's put into the built-in environment in MkId.lhs. That is, the MkId.lhs defn over-rides the
-inlining and strictness information that comes in from GHC.Base.hi. And that makes lazy look
-lazy, and have no inlining. So the strictness analyser gets no traction.
-
-In the worker/wrapper phase, after strictness analysis, lazy is "manually" inlined (see WorkWrap.lhs),
-so we get all the efficiency back.
-
-This supersedes an earlier scheme involving an even grosser hack in which par# and seq# returned an
-Int#. Now there is no seq# operator at all.
-
-
-
-
fold/build
-
- There is a lot of magic in PrelBase.lhs -
- among other things, the RULES
- pragmas implementing the fold/build
- optimisation. The code for map is
- a good example for how it all works. In the prelude code for version
- 5.03 it reads as follows:
-
-map :: (a -> b) -> [a] -> [b]
-map _ [] = []
-map f (x:xs) = f x : map f xs
-
--- Note eta expanded
-mapFB :: (elt -> lst -> lst) -> (a -> elt) -> a -> lst -> lst
-{-# INLINE [0] mapFB #-}
-mapFB c f x ys = c (f x) ys
-
-{-# RULES
-"map" [~1] forall f xs. map f xs = build (\c n -> foldr (mapFB c f) n xs)
-"mapList" [1] forall f. foldr (mapFB (:) f) [] = map f
-"mapFB" forall c f g. mapFB (mapFB c f) g = mapFB c (f.g)
- #-}
-
-
- Up to (but not including) phase 1, we use the "map" rule to
- rewrite all saturated applications of map with its
- build/fold form, hoping for fusion to happen. In phase 1 and 0, we
- switch off that rule, inline build, and switch on the
- "mapList" rule, which rewrites the foldr/mapFB thing back
- into plain map.
-
- It's important that these two rules aren't both active at once
- (along with build's unfolding) else we'd get an infinite loop
- in the rules. Hence the activation control using explicit phase numbers.
-
- The "mapFB" rule optimises compositions of map.
-
- The mechanism as described above is new in 5.03 since January 2002,
- where the [~N] syntax for phase number
- annotations at rules was introduced. Before that the whole arrangement
- was more complicated, as the corresponding prelude code for version
- 4.08.1 shows:
-
-map :: (a -> b) -> [a] -> [b]
-map = mapList
-
--- Note eta expanded
-mapFB :: (elt -> lst -> lst) -> (a -> elt) -> a -> lst -> lst
-mapFB c f x ys = c (f x) ys
-
-mapList :: (a -> b) -> [a] -> [b]
-mapList _ [] = []
-mapList f (x:xs) = f x : mapList f xs
-
-{-# RULES
-"map" forall f xs. map f xs = build (\c n -> foldr (mapFB c f) n xs)
-"mapFB" forall c f g. mapFB (mapFB c f) g = mapFB c (f.g)
-"mapList" forall f. foldr (mapFB (:) f) [] = mapList f
- #-}
-
-
- This code is structured as it is, because the "map" rule first
- breaks the map open, which exposes it to the various
- foldr/build rules, and if no foldr/build rule matches, the "mapList"
- rule closes it again in a later phase of optimisation - after
- build was inlined. As a consequence, the whole thing depends a bit on
- the timing of the various optimisations (the map might be closed again
- before any of the foldr/build rules fires). To make the timing
- deterministic, build gets a {-# INLINE 2 build
- #-} pragma, which delays build's inlining, and thus,
- the closing of the map. [NB: Phase numbering was forward at that time.]
-
-
- Most user-level Haskell types and functions provided by GHC (in
- particular those from the Prelude and GHC's Prelude extensions) are
- internally constructed from even more elementary types and functions.
- Most notably, GHC understands a notion of unboxed types, which
- are the Haskell representation of primitive bit-level integer, float,
- etc. types (as opposed to their boxed, heap allocated counterparts) -
- cf. "Unboxed
- Values as First Class Citizens."
-
-
The Ultimate Source of Primitives
-
- The hardwired types of GHC are brought into scope by the module
- PrelGHC. This modules only exists in the form of a
- handwritten interface file PrelGHC.hi-boot,
- which lists the type and function names, as well as instance
- declarations. The actually types of these names as well as their
- implementation is hardwired into GHC. Note that the names in this file
- are z-encoded, and in particular, identifiers ending on zh
- denote user-level identifiers ending in a hash mark (#),
- which is used to flag unboxed values or functions operating on unboxed
- values. For example, we have Char#, ord#, and
- so on.
-
-
The New Primitive Definition Scheme
-
- As of (about) the development version 4.11, the types and various
- properties of primitive operations are defined in the file primops.txt.pp.
- (Personally, I don't think that the .txt suffix is really
- appropriate, as the file is used for automatic code generation; the
- recent addition of .pp means that the file is now mangled
- by cpp.)
-
- The utility genprimopcode
- generates a series of Haskell files from primops.txt, which
- encode the types and various properties of the primitive operations as
- compiler internal data structures. These Haskell files are not complete
- modules, but program fragments, which are included into compiler modules
- during the GHC build process. The generated include files can be found
- in the directory fptools/ghc/compiler/ and carry names
- matching the pattern primop-*.hs-incl. They are generate
- during the execution of the boot target in the
- fptools/ghc/ directory. This scheme significantly
- simplifies the maintenance of primitive operations.
-
- As of development version 5.02, the primops.txt file also allows the
- recording of documentation about intended semantics of the primitives. This can
- be extracted into a latex document (or rather, into latex document fragments)
- via an appropriate switch to genprimopcode. In particular, see primops.txt
- for full details of how GHC is configured to cope with different machine word sizes.
-
-
- The GHC Commentary - Spineless Tagless C
-
-
-
-
The GHC Commentary - Spineless Tagless C
-
- The C code generated by GHC doesn't use higher-level features of C to be
- able to control as precisely as possible what code is generated.
- Moreover, it uses special features of gcc (such as, first class labels)
- to produce more efficient code.
-
- STG C makes ample use of C's macro language to define idioms, which also
- reduces the size of the generated C code (thus, reducing I/O times).
- These macros are defined in the C headers located in GHC's includes
- directory.
-
-
TailCalls.h
-
- TailCalls.h
- defines how tail calls are implemented - and in particular - optimised
- in GHC generated code. The default case, for an architecture for which
- GHC is not optimised, is to use the mini interpreter described in the STG paper.
-
- For supported architectures, various tricks are used to generate
- assembler implementing proper tail calls. On i386, gcc's first class
- labels are used to directly jump to a function pointer. Furthermore,
- markers of the form --- BEGIN --- and --- END
- --- are added to the assembly right after the function prologue
- and before the epilogue. These markers are used by the Evil Mangler.
-
-
-
- The GHC Commentary - The Multi-threaded runtime, and multiprocessor execution
-
-
-
-
The GHC Commentary - The Multi-threaded runtime, and multiprocessor execution
-
-
This section of the commentary explains the structure of the runtime system
- when used in threaded or SMP mode.
-
-
The threaded version of the runtime supports
- bound threads and non-blocking foreign calls, and an overview of its
- design can be found in the paper Extending
- the Haskell Foreign Function Interface with Concurrency. To
- compile the runtime with threaded support, add the line
-
-
GhcRTSWays += thr
-
- to mk/build.mk. When building C code in the runtime for the threaded way,
- the symbol THREADED_RTS is defined (this is arranged by the
- build system when building for way thr, see
- mk/config.mk). To build a Haskell program
- with the threaded runtime, pass the flag -threaded to GHC (this
- can be used in conjunction with -prof, and possibly
- -debug and others depending on which versions of the RTS have
- been built.
-
-
The SMP version runtime supports the same facilities as the
- threaded version, and in addition supports execution of Haskell code by
- multiple simultaneous OS threads. For SMP support, both the runtime and
- the libraries must be built a special way: add the lines
-
-
-GhcRTSWays += thr
-GhcLibWays += s
-
- to mk/build.mk. To build Haskell code for
- SMP execution, use the flag -smp to GHC (this can be used in
- conjunction with -debug, but no other way-flags at this time).
- When building C code in the runtime for SMP
- support, the symbol SMP is defined (this is arranged by the
- compiler when the -smp flag is given, see
- ghc/compiler/main/StaticFlags.hs).
-
-
When building the runtime in either the threaded or SMP ways, the symbol
- RTS_SUPPORTS_THREADS will be defined (see Rts.h).
-
-
Overall design
-
-
The system is based around the notion of a Capability. A
- Capability is an object that represents both the permission to
- execute some Haskell code, and the state required to do so. In order
- to execute some Haskell code, a thread must therefore hold a
- Capability. The available pool of capabilities is managed by
- the Capability API, described below.
-
-
In the threaded runtime, there is only a single Capability in the
- system, indicating that only a single thread can be executing Haskell
- code at any one time. In the SMP runtime, there can be an arbitrary
- number of capabilities selectable at runtime with the +RTS -Nn
- flag; in practice the number is best chosen to be the same as the number of
- processors on the host machine.
-
-
There are a number of OS threads running code in the runtime. We call
- these tasks to avoid confusion with Haskell threads.
- Tasks are managed by the Task subsystem, which is mainly
- concerned with keeping track of statistics such as how much time each
- task spends executing Haskell code, and also keeping track of how many
- tasks are around when we want to shut down the runtime.
-
-
Some tasks are created by the runtime itself, and some may be here
- as a result of a call to Haskell from foreign code (we
- call this an in-call). The
- runtime can support any number of concurrent foreign in-calls, but the
- number of these calls that will actually run Haskell code in parallel is
- determined by the number of available capabilities. Each in-call creates
- a bound thread, as described in the FFI/Concurrency paper (cited
- above).
-
-
In the future we may want to bind a Capability to a particular
- processor, so that we can support a notion of affinity - avoiding
- accidental migration of work from one CPU to another, so that we can make
- best use of a CPU's local cache. For now, the design ignores this
- issue.
-
-
The OSThreads interface
-
-
This interface is merely an abstraction layer over the OS-specific APIs
- for managing threads. It has two main implementations: Win32 and
- POSIX.
- GHC implements experimental (by now it is actually quite well tested)
- support for access to foreign functions and generally the interaction
- between Haskell code and code written in other languages. Code
- generation in this context can get quite tricky. This section attempts
- to cast some light on this aspect of the compiler.
-
-
FFI Stub Files
-
- For each Haskell module that contains a foreign export
- dynamic declaration, GHC generates a _stub.c file
- that needs to be linked with any program that imports the Haskell
- module. When asked about it Simon Marlow justified the
- existence of these files as follows:
-
- The stub files contain the helper function which invokes the Haskell
- code when called from C.
-
- Each time the foreign export dynamic is invoked to create a new
- callback function, a small piece of code has to be dynamically
- generated (by code in Adjustor.c). It is the address of this dynamically generated bit of
- code that is returned as the Addr (or Ptr).
- When called from C, the dynamically generated code must somehow invoke
- the Haskell function which was originally passed to the
- f.e.d. function -- it does this by invoking the helper function,
- passing it a StablePtr
- to the Haskell function. It's split this way for two reasons: the
- same helper function can be used each time the f.e.d. function is
- called, and to keep the amount of dynamically generated code to a
- minimum.
-
-
- The stub code is generated by DSForeign.fexportEntry.
-
-
-
- The directory fptools/ghc/compiler/basicTypes/
- contains modules that define some of the essential types definition for
- the compiler - such as, identifiers, variables, modules, and unique
- names. Some of those are discussed in the following. See elsewhere for more
- detailed information on:
-
- An Id (defined in Id.lhs
- essentially records information about value and data constructor
- identifiers -- to be precise, in the case of data constructors, two
- Ids are used to represent the worker and wrapper functions
- for the data constructor, respectively. The information maintained in
- the Id abstraction includes among other items strictness,
- occurrence, specialisation, and unfolding information.
-
- Due to the way Ids are used for data constructors,
- all Ids are represented as variables, which contain a
- varInfo field of abstract type IdInfo.IdInfo.
- This is where the information about Ids is really stored.
- The following is a (currently, partial) list of the various items in an
- IdInfo:
-
The OccInfo data type is defined in the module BasicTypes.lhs.
- Apart from the trivial NoOccInfo, it distinguishes
- between variables that do not occur at all (IAmDead),
- occur just once (OneOcc), or a loop breakers
- (IAmALoopBreaker).
-
-
-
Sets, Finite Maps, and Environments
-
- Sets of variables, or more generally names, which are needed throughout
- the compiler, are provided by the modules VarSet.lhs
- and NameSet.lhs,
- respectively. Moreover, frequently maps from variables (or names) to
- other data is needed. For example, a substitution is represented by a
- finite map from variable names to expressions. Jobs like this are
- solved by means of variable and name environments implemented by the
- modules VarEnv.lhs
- and NameEnv.lhs.
-
-
The Module VarSet
-
- The Module VarSet provides the types VarSet,
- IdSet, and TyVarSet, which are synonyms in the
- current implementation, as Var, Id, and
- TyVar are synonyms. The module provides all the operations
- that one would expect including the creating of sets from individual
- variables and lists of variables, union and intersection operations,
- element checks, deletion, filter, fold, and map functions.
-
- The implementation is based on UniqSets,
- which in turn are simply UniqFMs
- (i.e., finite maps with uniques as keys) that map each unique to the
- variable that it represents.
-
-
The Module NameSet
-
- The Module NameSet provides the same functionality as
- VarSet only for Names.
- As for the difference between Names and Vars,
- a Var is built from a Name plus additional
- information (mostly importantly type information).
-
-
The Module VarEnv
-
- The module VarEnv provides the types VarEnv,
- IdEnv, and TyVarEnv, which are again
- synonyms. The provided base functionality is similar to
- VarSet with the main difference that a type VarEnv
- T associates a value of type T with each variable in
- the environment, thus effectively implementing a finite map from
- variables to values of type T.
-
- The implementation of VarEnv is also by UniqFM,
- which entails the slightly surprising implication that it is
- not possible to retrieve the domain of a variable environment.
- In other words, there is no function corresponding to
- VarSet.varSetElems :: VarSet -> [Var] in
- VarEnv. This is because the UniqFM used to
- implement VarEnv stores only the unique corresponding to a
- variable in the environment, but not the entire variable (and there is
- no mapping from uniques to variables).
-
- In addition to plain variable environments, the module also contains
- special substitution environments - the type SubstEnv -
- that associates variables with a special purpose type
- SubstResult.
-
-
The Module NameEnv
-
- The type NameEnv.NameEnv is like VarEnv only
- for Names.
-
-
This is a rough description of some of the coding practices and
- style that we use for Haskell code inside ghc/compiler.
-
-
The general rule is to stick to the same coding style as is
- already used in the file you're editing. If you must make
- stylistic changes, commit them separately from functional changes,
- so that someone looking back through the change logs can easily
- distinguish them.
-
-
To literate or not to literate?
-
-
In GHC we use a mixture of literate (.lhs) and
- non-literate (.hs) source. I (Simon M.) prefer to use
- non-literate style, because I think the
- \begin{code}..\end{code} clutter up the source too much,
- and I like to use Haddock-style comments (we haven't tried
- processing the whole of GHC with Haddock yet, though).
-
-
To CPP or not to CPP?
-
-
We pass all the compiler sources through CPP. The
- -cpp flag is always added by the build system.
-
-
The following CPP symbols are used throughout the compiler:
-
-
-
DEBUG
-
-
Used to enables extra checks and debugging output in the
- compiler. The ASSERT macro (see HsVersions.h)
- provides assertions which disappear when DEBUG is not
- defined.
-
-
All debugging output should be placed inside #ifdef
- DEBUG; we generally use this to provide warnings about
- strange cases and things that might warrant investigation. When
- DEBUG is off, the compiler should normally be silent
- unless something goes wrong (exception when the verbosity level
- is greater than zero).
-
-
A good rule of thumb is that DEBUG shouldn't add
- more than about 10-20% to the compilation time. This is the case
- at the moment. If it gets too expensive, we won't use it. For
- more expensive runtime checks, consider adding a flag - see for
- example -dcore-lint.
-
-
-
GHCI
-
-
Enables GHCi support, including the byte code generator and
- interactive user interface. This isn't the default, because the
- compiler needs to be bootstrapped with itself in order for GHCi
- to work properly. The reason is that the byte-code compiler and
- linker are quite closely tied to the runtime system, so it is
- essential that GHCi is linked with the most up-to-date RTS.
- Another reason is that the representation of certain datatypes
- must be consistent between GHCi and its libraries, and if these
- were inconsistent then disaster could follow.
-
-
-
-
-
Platform tests
-
-
There are three platforms of interest to GHC:
-
-
-
The Build platform. This is the platform on which we
- are building GHC.
-
The Host platform. This is the platform on which we
- are going to run this GHC binary, and associated tools.
-
The Target platform. This is the platform for which
- this GHC binary will generate code.
-
-
-
At the moment, there is very limited support for having
- different values for buil, host, and target. In particular:
-
-
-
The build platform is currently always the same as the host
- platform. The build process needs to use some of the tools in
- the source tree, for example ghc-pkg and
- hsc2hs.
-
-
If the target platform differs from the host platform, then
- this is generally for the purpose of building .hc files
- from Haskell source for porting GHC to the target platform.
- Full cross-compilation isn't supported (yet).
-
-
-
In the compiler's source code, you may make use of the
- following CPP symbols:
-
-
-
xxx_TARGET_ARCH
-
xxx_TARGET_VENDOR
-
xxx_TARGET_OS
-
xxx_HOST_ARCH
-
xxx_HOST_VENDOR
-
xxx_HOST_OS
-
-
-
where xxx is the appropriate value:
- eg. i386_TARGET_ARCH.
-
-
Compiler versions
-
-
GHC must be compilable by every major version of GHC from 5.02
- onwards, and itself. It isn't necessary for it to be compilable
- by every intermediate development version (that includes last
- week's CVS sources).
-
-
To maintain compatibility, use HsVersions.h (see
- below) where possible, and try to avoid using #ifdef in
- the source itself.
-
-
The source file
-
-
We now describe a typical source file, annotating stylistic
- choices as we go.
-
-
-{-# OPTIONS ... #-}
-
-
-
An OPTIONS pragma is optional, but if present it
- should go right at the top of the file. Things you might want to
- put in OPTIONS include:
-
-
-
-#include options to bring into scope prototypes
- for FFI declarations
-
-fvia-C if you know that
- this module won't compile with the native code generator.
-
-
-
Don't bother putting -cpp or -fglasgow-exts
- in the OPTIONS pragma; these are already added to the
- command line by the build system.
-
-
-
-module Foo (
- T(..),
- foo, -- :: T -> T
- ) where
-
-
-
We usually (99% of the time) include an export list. The only
- exceptions are perhaps where the export list would list absolutely
- everything in the module, and even then sometimes we do it anyway.
-
-
It's helpful to give type signatures inside comments in the
- export list, but hard to keep them consistent, so we don't always
- do that.
-
-
-#include "HsVersions.h"
-
-
-
HsVersions.h is a CPP header file containing a number
- of macros that help smooth out the differences between compiler
- versions. It defines, for example, macros for library module
- names which have moved between versions. Take a look.
-
-
Compiler imports, generally ordered from specific to generic
- (ie. modules from utils/ and basicTypes/
- usually come last)
-
-
Library imports
-
-
Standard Haskell 98 imports last
-
-
-
Import library modules from the base and
- haskell98 packages only. Use #defines in
- HsVersions.h when the modules names differ between
- versions of GHC (eg. DATA_IOREF in the example above).
- For code inside #ifdef GHCI, don't need to worry about GHC
- versioning (because we are bootstrapped).
-
-
We usually use import specs to give an explicit list of the
- entities imported from a module. The main reason for doing this is
- so that you can search the file for an entity and find which module
- it comes from. However, huge import lists can be a pain to
- maintain, so we often omit the import specs when they start to get
- long (actually I start omitting them when they don't fit on one
- line --Simon M.). Tip: use GHC's -fwarn-unused-imports
- flag so that you get notified when an import isn't being used any
- more.
-
-
If the module can be compiled multiple ways (eg. GHCI
- vs. non-GHCI), make sure the imports are properly #ifdefed
- too, so as to avoid spurious unused import warnings.
-
-
-
- The GHC Commentary - Data types and data constructors
-
-
-
-
The GHC Commentary - Data types and data constructors
-
-
-This chapter was thoroughly changed Feb 2003.
-
-
Data types
-
-Consider the following data type declaration:
-
-
- data T a = MkT !(a,a) !(T a) | Nil
-
- f x = case x of
- MkT p q -> MkT p (q+1)
- Nil -> Nil
-
-The user's source program mentions only the constructors MkT
-and Nil. However, these constructors actually do something
-in addition to building a data value. For a start, MkT evaluates
-its arguments. Secondly, with the flag -funbox-strict-fields GHC
-will flatten (or unbox) the strict fields. So we may imagine that there's the
-source constructor MkT and the representation constructor
-MkT, and things start to get pretty confusing.
-
-GHC now generates three unique Names for each data constructor:
-
- ---- OccName ------
- String Name space Used for
- ---------------------------------------------------------------------------
- The "source data con" MkT DataName The DataCon itself
- The "worker data con" MkT VarName Its worker Id
- aka "representation data con"
- The "wrapper data con" $WMkT VarName Its wrapper Id (optional)
-
-Recall that each occurrence name (OccName) is a pair of a string and a
-name space (see The truth about names), and
-two OccNames are considered the same only if both components match.
-That is what distinguishes the name of the name of the DataCon from
-the name of its worker Id. To keep things unambiguous, in what
-follows we'll write "MkT{d}" for the source data con, and "MkT{v}" for
-the worker Id. (Indeed, when you dump stuff with "-ddumpXXX", if you
-also add "-dppr-debug" you'll get stuff like "Foo {- d rMv -}". The
-"d" part is the name space; the "rMv" is the unique key.)
-
-Each of these three names gets a distinct unique key in GHC's name cache.
-
-
The life cycle of a data type
-
-Suppose the Haskell source looks like this:
-
- data T a = MkT !(a,a) !Int | Nil
-
- f x = case x of
- Nil -> Nil
- MkT p q -> MkT p (q+1)
-
-When the parser reads it in, it decides which name space each lexeme comes
-from, thus:
-
- data T a = MkT{d} !(a,a) !Int | Nil{d}
-
- f x = case x of
- Nil{d} -> Nil{d}
- MkT{d} p q -> MkT{d} p (q+1)
-
-Notice that in the Haskell source all data contructors are named via the "source data con" MkT{d},
-whether in pattern matching or in expressions.
-
-In the translated source produced by the type checker (-ddump-tc), the program looks like this:
-
- f x = case x of
- Nil{d} -> Nil{v}
- MkT{d} p q -> $WMkT p (q+1)
-
-
-Notice that the type checker replaces the occurrence of MkT by the wrapper, but
-the occurrence of Nil by the worker. Reason: Nil doesn't have a wrapper because there is
-nothing to do in the wrapper (this is the vastly common case).
-
-Though they are not printed out by "-ddump-tc", behind the scenes, there are
-also the following: the data type declaration and the wrapper function for MkT.
-
- data T a = MkT{d} a a Int# | Nil{d}
-
- $WMkT :: (a,a) -> T a -> T a
- $WMkT p t = case p of
- (a,b) -> seq t (MkT{v} a b t)
-
-Here, the wrapper$WMkT evaluates and takes apart the argument p,
-evaluates the argument t, and builds a three-field data value
-with the worker constructor MkT{v}. (There are more notes below
-about the unboxing of strict fields.) The worker $WMkT is called an implicit binding,
-because it's introduced implicitly by the data type declaration (record selectors
-are also implicit bindings, for example). Implicit bindings are injected into the code
-just before emitting code or External Core.
-
-After desugaring into Core (-ddump-ds), the definition of f looks like this:
-
- f x = case x of
- Nil{d} -> Nil{v}
- MkT{d} a b r -> let { p = (a,b); q = I# r } in
- $WMkT p (q+1)
-
-Notice the way that pattern matching has been desugared to take account of the fact
-that the "real" data constructor MkT has three fields.
-
-By the time the simplifier has had a go at it, f will be transformed to:
-
- f x = case x of
- Nil{d} -> Nil{v}
- MkT{d} a b r -> MkT{v} a b (r +# 1#)
-
-Which is highly cool.
-
-
-
The constructor wrapper functions
-
-The wrapper functions are automatically generated by GHC, and are
-really emitted into the result code (albeit only after CorePre; see
-CorePrep.mkImplicitBinds).
-The wrapper functions are inlined very
-vigorously, so you will not see many occurrences of the wrapper
-functions in an optimised program, but you may see some. For example,
-if your Haskell source has
-
- map MkT xs
-
-then $WMkT will not be inlined (because it is not applied to anything).
-That is why we generate real top-level bindings for the wrapper functions,
-and generate code for them.
-
-
-
The constructor worker functions
-
-Saturated applications of the constructor worker function MkT{v} are
-treated specially by the code generator; they really do allocation.
-However, we do want a single, shared, top-level definition for
-top-level nullary constructors (like True and False). Furthermore,
-what if the code generator encounters a non-saturated application of a
-worker? E.g. (map Just xs). We could declare that to be an
-error (CorePrep should saturate them). But instead we currently
-generate a top-level definition for each constructor worker, whether
-nullary or not. It takes the form:
-
- MkT{v} = \ p q r -> MkT{v} p q r
-
-This is a real hack. The occurrence on the RHS is saturated, so the code generator (both the
-one that generates abstract C and the byte-code generator) treats it as a special case and
-allocates a MkT; it does not make a recursive call! So now there's a top-level curried
-version of the worker which is available to anyone who wants it.
-
-This strange definition is not emitted into External Core. Indeed, you might argue that
-we should instead pass the list of TyCons to the code generator and have it
-generate magic bindings directly. As it stands, it's a real hack: see the code in
-CorePrep.mkImplicitBinds.
-
-
-
External Core
-
-When emitting External Core, we should see this for our running example:
-
-
- data T a = MkT a a Int# | Nil{d}
-
- $WMkT :: (a,a) -> T a -> T a
- $WMkT p t = case p of
- (a,b) -> seq t (MkT a b t)
-
- f x = case x of
- Nil -> Nil
- MkT a b r -> MkT a b (r +# 1#)
-
-Notice that it makes perfect sense as a program all by itself. Constructors
-look like constructors (albeit not identical to the original Haskell ones).
-
-When reading in External Core, the parser is careful to read it back in just
-as it was before it was spat out, namely:
-
- data T a = MkT{d} a a Int# | Nil{d}
-
- $WMkT :: (a,a) -> T a -> T a
- $WMkT p t = case p of
- (a,b) -> seq t (MkT{v} a b t)
-
- f x = case x of
- Nil{d} -> Nil{v}
- MkT{d} a b r -> MkT{v} a b (r +# 1#)
-
-
-
-
Unboxing strict fields
-
-If GHC unboxes strict fields (as in the first argument of MkT above),
-it also transforms
-source-language case expressions. Suppose you write this in your Haskell source:
-
- case e of
- MkT p t -> ..p..t..
-
-GHC will desugar this to the following Core code:
-
- case e of
- MkT a b t -> let p = (a,b) in ..p..t..
-
-The local let-binding reboxes the pair because it may be mentioned in
-the case alternative. This may well be a bad idea, which is why
--funbox-strict-fields is an experimental feature.
-
-It's essential that when importing a type T defined in some
-external module M, GHC knows what representation was used for
-that type, and that in turn depends on whether module M was
-compiled with -funbox-strict-fields. So when writing an
-interface file, GHC therefore records with each data type whether its
-strict fields (if any) should be unboxed.
-
-
Labels and info tables
-
-Quick rough notes: SLPJ March 2003.
-
-Every data constructor Chas two info tables:
-
-
The static info table (label C_static_info), used for statically-allocated constructors.
-
-
The dynamic info table (label C_con_info), used for dynamically-allocated constructors.
-
-Statically-allocated constructors are not moved by the garbage collector, and therefore have a different closure
-type from dynamically-allocated constructors; hence they need
-a distinct info table.
-Both info tables share the same entry code, but since the entry code is phyiscally juxtaposed with the
-info table, it must be duplicated (C_static_entry and C_con_entry respectively).
-
-
-
-
diff --git a/docs/comm/the-beast/desugar.html b/docs/comm/the-beast/desugar.html
deleted file mode 100644
index a66740259bff..000000000000
--- a/docs/comm/the-beast/desugar.html
+++ /dev/null
@@ -1,156 +0,0 @@
-
-
-
-
- The GHC Commentary - Sugar Free: From Haskell To Core
-
-
-
-
The GHC Commentary - Sugar Free: From Haskell To Core
-
- Up until after type checking, GHC keeps the source program in an
- abstract representation of Haskell source without removing any of the
- syntactic sugar (such as, list comprehensions) that could easily be
- represented by more primitive Haskell. This complicates part of the
- front-end considerably as the abstract syntax of Haskell (as exported by
- the module HsSyn)
- is much more complex than a simplified representation close to, say, the
- Haskell
- Kernel would be. However, having a representation that is as close
- as possible to the surface syntax simplifies the generation of clear
- error messages. As GHC (quite in contrast to "conventional" compilers)
- prints code fragments as part of error messages, the choice of
- representation is especially important.
-
- Nonetheless, as soon as the input has passed all static checks, it is
- transformed into GHC's principal intermediate language that goes by the
- name of Core and whose representation is exported by the
- module CoreSyn.
- All following compiler phases, except code generation operate on Core.
- Due to Andrew Tolmach's effort, there is also an external
- representation for Core.
-
- The conversion of the compiled module from HsSyn into that
- of CoreSyn is performed by a phase called the
- desugarer, which is located in
- fptools/ghc/compiler/deSugar/.
- It's operation is detailed in the following.
-
-
-
Auxilliary Functions
-
- The modules DsMonad
- defines the desugarer monad (of type DsM) which maintains
- the environment needed for desugaring. In particular, it encapsulates a
- unique supply for generating new variables, a map to lookup standard
- names (such as functions from the prelude), a source location for error
- messages, and a pool to collect warning messages generated during
- desugaring. Initialisation of the environment happens in the function Desugar.desugar,
- which is also the main entry point into the desugarer.
-
- The generation of Core code often involves the use of standard functions
- for which proper identifiers (i.e., values of type Id that
- actually refer to the definition in the right Prelude) need to be
- obtained. This is supported by the function
- DsMonad.dsLookupGlobalValue :: Name -> DsM Id.
-
-
This modules contains the main pattern-matching compiler in the form
- of a function called match. There is some documentation
- as to how match works contained in the module itself.
- Generally, the implemented algorithm is similar to the one described
- in Phil Wadler's Chapter ? of Simon Peyton Jones' The
- Implementation of Functional Programming Languages.
- Match exports a couple of functions with not really
- intuitive names. In particular, it exports match,
- matchWrapper, matchExport, and
- matchSimply. The function match, which is
- the main work horse, is only used by the other matching modules. The
- function matchExport - despite it's name - is merely used
- internally in Match and handles warning messages (see
- below for more details). The actual interface to the outside is
- matchWrapper, which converts the output of the type
- checker into the form needed by the pattern matching compiler (i.e., a
- list of EquationInfo). Similar in function to
- matchWrapper is matchSimply, which provides
- an interface for the case where a single expression is to be matched
- against a single pattern (as, for example, is the case in bindings in
- a do expression).
-
This module generates code for a set of alternative constructor
- patterns that belong to a single type by means of the routine
- matchConFamily. More precisely, the routine gets a set
- of equations where the left-most pattern of each equation is a
- constructor pattern with a head symbol from the same type as that of
- all the other equations. A Core case expression is generated that
- distinguihes between all these constructors. The routine is clever
- enough to generate a sparse case expression and to add a catch-all
- default case only when needed (i.e., if the case expression isn't
- exhaustive already). There is also an explanation at the start of the
- modules.
-
Generates code for a set of alternative literal patterns by means of
- the routine matchLiterals. The principle is similar to
- that of matchConFamily, but all left-most patterns are
- literals of the same type.
-
This module provides a set of auxilliary definitions as well as the
- data types EquationInfo and MatchResult that
- form the input and output, respectively, of the pattern matching
- compiler.
-
This module does not really contribute the compiling pattern
- matching, but it inspects sets of equations to find whether there are
- any overlapping patterns or non-exhaustive pattern sets. This task is
- implemented by the function check, which returns a list of
- patterns that are part of a non-exhaustive case distinction as well as a
- set of equation labels that can be reached during execution of the code;
- thus, the remaining equations are shadowed due to overlapping patterns.
- The function check is invoked and its result converted into
- suitable warning messages by the function Match.matchExport
- (which is a wrapper for Match.match).
-
-
- The central function match, given a set of equations,
- proceeds in a number of steps:
-
-
It starts by desugaring the left-most pattern of each equation using
- the function tidy1 (indirectly via
- tidyEqnInfo). During this process, non-elementary
- pattern (e.g., those using explicit list syntax [x, y, ...,
- z]) are converted to a standard constructor pattern and also
- irrefutable pattern are removed.
-
Then, a process called unmixing clusters the equations into
- blocks (without re-ordering them), such that the left-most pattern of
- all equations in a block are either all variables, all literals, or
- all constructors.
-
Each block is, then, compiled by matchUnmixedEqns,
- which forwards the handling of literal pattern blocks to
- MatchLit.matchLiterals, of constructor pattern blocks to
- MatchCon.matchConFamily, and hands variable pattern
- blocks back to match.
-
-
-
-
-Last modified: Mon Feb 11 22:35:25 EST 2002
-
-
-
-
diff --git a/docs/comm/the-beast/driver.html b/docs/comm/the-beast/driver.html
deleted file mode 100644
index fbf65e33e79e..000000000000
--- a/docs/comm/the-beast/driver.html
+++ /dev/null
@@ -1,179 +0,0 @@
-
-
-
-
- The GHC Commentary - The Glorious Driver
-
-
-
-
The GHC Commentary - The Glorious Driver
-
- The Glorious Driver (GD) is the part of GHC that orchestrates the
- interaction of all the other pieces that make up GHC. It supersedes the
- Evil Driver (ED), which was a Perl script that served the same
- purpose and was in use until version 4.08.1 of GHC. Simon Marlow
- eventually slayed the ED and instated the GD. The GD is usually called
- the Compilation Manager these days.
-
-
- The GD has been substantially extended for GHCi, i.e., the interactive
- variant of GHC that integrates the compiler with a (meta-circular)
- interpreter since version 5.00. Most of the driver is located in the
- directory
- fptools/ghc/compiler/main/.
-
-
-
Command Line Options
-
- GHC's many flavours of command line options make the code interpreting
- them rather involved. The following provides a brief overview of the
- processing of these options. Since the addition of the interactive
- front-end to GHC, there are two kinds of options: static
- options and dynamic options. The former can only be set
- when the system is invoked, whereas the latter can be altered in the
- course of an interactive session. A brief explanation on the difference
- between these options and related matters is at the start of the module
- CmdLineOpts.
- The same module defines the enumeration DynFlag, which
- contains all dynamic flags. Moreover, there is the labelled record
- DynFlags that collects all the flag-related information
- that is passed by the compilation manager to the compiler proper,
- hsc, whenever a compilation is triggered. If you like to
- find out whether an option is static, use the predicate
- isStaticHscFlag in the same module.
-
- The second module that contains a lot of code related to the management
- of flags is DriverFlags.hs.
- In particular, the module contains two association lists that map the
- textual representation of the various flags to a data structure that
- tells the driver how to parse the flag (e.g., whether it has any
- arguments) and provides its internal representation. All static flags
- are contained in static_flags. A whole range of
- -f flags can be negated by adding a -f-no-
- prefix. These flags are contained in the association list
- fFlags.
-
- The driver uses a nasty hack based on IORefs that permits
- the rest of the compiler to access static flags as CAFs; i.e., there is
- a family of toplevel variable definitions in
- CmdLineOpts,
- below the literate section heading Static options, each of which
- contains the value of one static option. This is essentially realised
- via global variables (in the sense of C-style, updatable, global
- variables) defined via an evil pre-processor macro named
- GLOBAL_VAR, which is defined in a particularly ugly corner
- of GHC, namely the C header file
- HsVersions.h.
-
-
What Happens When
-
- Inside the Haskell compiler proper (hsc), a whole series of
- stages (``passes'') are executed in order to transform your Haskell program
- into C or native code. This process is orchestrated by
- main/HscMain.hscMain and its relative
- hscReComp. The latter directly invokes, in order,
- the parser, the renamer, the typechecker, the desugarer, the
- simplifier (Core2Core), the CoreTidy pass, the CorePrep pass,
- conversion to STG (CoreToStg), the interface generator
- (MkFinalIface), the code generator, and code output. The
- simplifier is the most complex of these, and is made up of many
- sub-passes. These are controlled by buildCoreToDo,
- as described below.
-
-
Scheduling Optimisations Phases
-
- GHC has a large variety of optimisations at its disposal, many of which
- have subtle interdependencies. The overall plan for program
- optimisation is fixed in DriverState.hs.
- First of all, there is the variable hsc_minusNoO_flags that
- determines the -f options that you get without
- -O (aka optimisation level 0) as well as
- hsc_minusO_flags and hsc_minusO2_flags for
- -O and -O2.
-
- However, most of the strategic decisions about optimisations on the
- intermediate language Core are encoded in the value produced by
- buildCoreToDo, which is a list with elements of type
- CoreToDo. Each element of this list specifies one step in
- the sequence of core optimisations executed by the Mighty Simplifier. The type
- CoreToDo is defined in CmdLineOpts.lhs.
- The actual execution of the optimisation plan produced by
- buildCoreToDo is performed by SimpleCore.doCorePasses.
- Core optimisation plans consist of a number of simplification phases
- (currently, three for optimisation levels of 1 or higher) with
- decreasing phase numbers (the lowest, corresponding to the last phase,
- namely 0). Before and after these phases, optimisations such as
- specialisation, let floating, worker/wrapper, and so on are executed.
- The sequence of phases is such that the synergistic effect of the phases
- is maximised -- however, this is a fairly fragile arrangement.
-
- There is a similar construction for optimisations on STG level stored in
- the variable buildStgToDo :: [StgToDo]. However, this is a
- lot less complex than the arrangement for Core optimisations.
-
-
Linking the RTS and libHSstd
-
- Since the RTS and HSstd refer to each other, there is a Cunning
- Hack to avoid putting them each on the command-line twice or
- thrice (aside: try asking for `plaice and chips thrice' in a
- fish and chip shop; bet you only get two lots). The hack involves
- adding
- the symbols that the RTS needs from libHSstd, such as
- PrelWeak_runFinalizzerBatch_closure and
- __stginit_Prelude, to the link line with the
- -u flag. The standard library appears before the
- RTS on the link line, and these options cause the corresponding
- symbols to be picked up even so the linked might not have seen them
- being used as the RTS appears later on the link line. As a result,
- when the RTS is also scanned, these symbols are already resolved. This
- avoids the linker having to read the standard library and RTS
- multiple times.
-
-
- This does, however, leads to a complication. Normal Haskell
- programs do not have a main() function, so this is
- supplied by the RTS (in the file
- Main.c).
- It calls startupHaskell, which
- itself calls __stginit_PrelMain, which is therefore,
- since it occurs in the standard library, one of the symbols
- passed to the linker using the -u option. This is fine
- for standalone Haskell programs, but as soon as the Haskell code is only
- used as part of a program implemented in a foreign language, the
- main() function of that foreign language should be used
- instead of that of the Haskell runtime. In this case, the previously
- described arrangement unfortunately fails as
- __stginit_PrelMain had better not be linked in,
- because it tries to call __stginit_Main, which won't
- exist. In other words, the RTS's main() refers to
- __stginit_PrelMain which in turn refers to
- __stginit_Main. Although the RTS's main()
- might not be linked in if the program provides its own, the driver
- will normally force __stginit_PrelMain to be linked in anyway,
- using -u, because it's a back-reference from the
- RTS to HSstd. This case is coped with by the -no-hs-main
- flag, which suppresses passing the corresonding -u option
- to the linker -- although in some versions of the compiler (e.g., 5.00.2)
- it didn't work. In addition, the driver generally places the C program
- providing the main() that we want to use before the RTS
- on the link line. Therefore, the RTS's main is never used and
- without the -u the label __stginit_PrelMain
- will not be linked.
-
- This does the obvious thing: builds in the heap the expression
- (foo a1 a2), calls rts_evalIO to run it,
- and uses rts_getInt to fish out the result.
-
-
- (2) static export of a non-IO-typed function from some module MMM
-
- foreign export foo :: Int -> Int -> Int
-
- This is identical to case (1), with the sole difference that the
- stub calls rts_eval rather than
- rts_evalIO.
-
-
- (3) dynamic export of an IO-typed function from some module MMM
-
- foreign export mkCallback :: (Int -> Int -> IO Int) -> IO (FunPtr a)
-
- Dynamic exports are a whole lot more complicated than their static
- counterparts.
-
- First of all, we get some Haskell code, which, when given a
- function callMe :: (Int -> Int -> IO Int) to be made
- C-callable, IO-returns a FunPtr a, which is the
- address of the resulting C-callable code. This address can now be
- handed out to the C-world, and callers to it will get routed
- through to callMe.
-
- The generated Haskell function looks like this:
-
-
-mkCallback f
- = do sp <- mkStablePtr f
- r <- ccall "createAdjustorThunk" sp (&"run_mkCallback")
- return r
-
-
- createAdjustorThunk is a gruesome,
- architecture-specific function in the RTS. It takes a stable
- pointer to the Haskell function to be run, and the address of the
- associated C wrapper, and returns a piece of machine code,
- which, when called from the outside (C) world, eventually calls
- through to f.
-
- This machine code fragment is called the "Adjustor Thunk" (don't
- ask me why). What it does is simply to call onwards to the C
- helper
- function run_mkCallback, passing all the args given
- to it but also conveying sp, which is a stable
- pointer
- to the Haskell function to run. So:
-
-
-createAdjustorThunk ( StablePtr sp, CCodeAddress addr_of_helper_C_fn )
-{
- create malloc'd piece of machine code "mc", behaving thusly:
-
- mc ( args_to_mc )
- {
- jump to addr_of_helper_C_fn, passing sp as an additional
- argument
- }
-
-
- This is a horrible hack, because there is no portable way, even at
- the machine code level, to function which adds one argument and
- then transfers onwards to another C function. On x86s args are
- pushed R to L onto the stack, so we can just push sp,
- fiddle around with return addresses, and jump onwards to the
- helper C function. However, on architectures which use register
- windows and/or pass args extensively in registers (Sparc, Alpha,
- MIPS, IA64), this scheme borders on the unviable. GHC has a
- limited createAdjustorThunk implementation for Sparc
- and Alpha, which handles only the cases where all args, including
- the extra one, fit in registers.
-
- Anyway: the other lump of code generated as a result of a
- f-x-dynamic declaration is the C helper stub. This is basically
- the same as in the static case, except that it only ever gets
- called from the adjustor thunk, and therefore must accept
- as an extra argument, a stable pointer to the Haskell function
- to run, naturally enough, as this is not known until run-time.
- It then dereferences the stable pointer and does the call in
- the same way as the f-x-static case:
-
- Note how this function has a purely made-up name
- Main_d1kv, since unlike the f-x-static case, this
- function is never called from user code, only from the adjustor
- thunk.
-
- Note also how the function takes a bogus parameter
- original_return_addr, which is part of this extra-arg
- hack. The usual scheme is to leave the original caller's return
- address in place and merely push the stable pointer above that,
- hence the spare parameter.
-
- Finally, there is some extra trickery, detailed in
- ghc/rts/Adjustor.c, to get round the following
- problem: the adjustor thunk lives in mallocville. It is
- quite possible that the Haskell code will actually
- call free() on the adjustor thunk used to get to it
- -- because otherwise there is no way to reclaim the space used
- by the adjustor thunk. That's all very well, but it means that
- the C helper cannot return to the adjustor thunk in the obvious
- way, since we've already given it back using free().
- So we leave, on the C stack, the address of whoever called the
- adjustor thunk, and before calling the helper, mess with the stack
- such that when the helper returns, it returns directly to the
- adjustor thunk's caller.
-
- That's how the stdcall convention works. If the
- adjustor thunk has been called using the ccall
- convention, we return indirectly, via a statically-allocated
- yet-another-magic-piece-of-code, which takes care of removing the
- extra argument that the adjustor thunk pushed onto the stack.
- This is needed because in ccall-world, it is the
- caller who removes args after the call, and the original caller of
- the adjustor thunk has no way to know about the extra arg pushed
- by the adjustor thunk.
-
- You didn't really want to know all this stuff, did you?
-
-
-
-
- (4) dynamic export of an non-IO-typed function from some module MMM
-
- foreign export mkCallback :: (Int -> Int -> Int) -> IO (FunPtr a)
-
- (4) relates to (3) as (2) relates to (1), that is, it's identical,
- except the C stub uses rts_eval instead of
- rts_evalIO.
-
-
-
-
Some perspective on f-x-dynamic
-
- The only really horrible problem with f-x-dynamic is how the
- adjustor thunk should pass to the C helper the stable pointer to
- use. Ideally we would like this to be conveyed via some invisible
- side channel, since then the adjustor thunk could simply jump
- directly to the C helper, with no non-portable stack fiddling.
-
- Unfortunately there is no obvious candidate for the invisible
- side-channel. We've chosen to pass it on the stack, with the
- bad consequences detailed above. Another possibility would be to
- park it in a global variable, but this is non-reentrant and
- non-(OS-)thread-safe. A third idea is to put it into a callee-saves
- register, but that has problems too: the C helper may not use that
- register and therefore we will have trashed any value placed there
- by the caller; and there is no C-level portable way to read from
- the register inside the C helper.
-
- In short, we can't think of a really satisfactory solution. I'd
- vote for introducing some kind of OS-thread-local-state and passing
- it in there, but that introduces complications of its own.
-
- OS-thread-safety is of concern in the C stubs, whilst
- building up the expressions to run. These need to have exclusive
- access to the heap whilst allocating in it. Also, there needs to
- be some guarantee that no GC will happen in between the
- deRefStablePtr call and when rts_eval[IO]
- starts running. At the moment there are no guarantees for
- either property. This needs to be sorted out before the
- implementation can be regarded as fully safe to use.
-
-
-
- This isn't a coherent description of how GHCi works, sorry. What
- it is (currently) is a dumping ground for various bits of info
- pertaining to GHCi, which ought to be recorded somewhere.
-
-
Debugging the interpreter
-
- The usual symptom is that some expression / program crashes when
- running on the interpreter (commonly), or gets wierd results
- (rarely). Unfortunately, finding out what the problem really is
- has proven to be extremely difficult. In retrospect it may be
- argued a design flaw that GHC's implementation of the STG
- execution mechanism provides only the weakest of support for
- automated internal consistency checks. This makes it hard to
- debug.
-
- Execution failures in the interactive system can be due to
- problems with the bytecode interpreter, problems with the bytecode
- generator, or problems elsewhere. From the bugs seen so far,
- the bytecode generator is often the culprit, with the interpreter
- usually being correct.
-
- Here are some tips for tracking down interactive nonsense:
-
-
Find the smallest source fragment which causes the problem.
-
-
Using an RTS compiled with -DDEBUG (nb, that
- means the RTS from the previous stage!), run with +RTS
- -D2 to get a listing in great detail from the
- interpreter. Note that the listing is so voluminous that
- this is impractical unless you have been diligent in
- the previous step.
-
-
At least in principle, using the trace and a bit of GDB
- poking around at the time of death, you can figure out what
- the problem is. In practice you quickly get depressed at
- the hopelessness of ever making sense of the mass of
- details. Well, I do, anyway.
-
-
+RTS -D2 tries hard to print useful
- descriptions of what's on the stack, and often succeeds.
- However, it has no way to map addresses to names in
- code/data loaded by our runtime linker. So the C function
- ghci_enquire is provided. Given an address, it
- searches the loaded symbol tables for symbols close to that
- address. You can run it from inside GDB:
-
- In this case the enquired-about address is
- PrelBase_ZMZN_static_entry. If no symbols are
- close to the given addr, nothing is printed. Not a great
- mechanism, but better than nothing.
-
-
We have had various problems in the past due to the bytecode
- generator (compiler/ghci/ByteCodeGen.lhs) being
- confused about the true set of free variables of an
- expression. The compilation scheme for lets
- applies the BCO for the RHS of the let to its free
- variables, so if the free-var annotation is wrong or
- misleading, you end up with code which has wrong stack
- offsets, which is usually fatal.
-
-
The baseline behaviour of the interpreter is to interpret
- BCOs, and hand all other closures back to the scheduler for
- evaluation. However, this causes a huge number of expensive
- context switches, so the interpreter knows how to enter the
- most common non-BCO closure types by itself.
-
- These optimisations complicate the interpreter.
- If you think you have an interpreter problem, re-enable the
- define REFERENCE_INTERPRETER in
- ghc/rts/Interpreter.c. All optimisations are
- thereby disabled, giving the baseline
- I-only-know-how-to-enter-BCOs behaviour.
-
-
Following the traces is often problematic because execution
- hops back and forth between the interpreter, which is
- traced, and compiled code, which you can't see.
- Particularly annoying is when the stack looks OK in the
- interpreter, then compiled code runs for a while, and later
- we arrive back in the interpreter, with the stack corrupted,
- and usually in a completely different place from where we
- left off.
-
- If this is biting you baaaad, it may be worth copying
- sources for the compiled functions causing the problem, into
- your interpreted module, in the hope that you stay in the
- interpreter more of the time. Of course this doesn't work
- very well if you've defined
- REFERENCE_INTERPRETER in
- ghc/rts/Interpreter.c.
-
-
There are various commented-out pieces of code in
- Interpreter.c which can be used to get the
- stack sanity-checked after every entry, and even after after
- every bytecode instruction executed. Note that some
- bytecodes (PUSH_UBX) leave the stack in
- an unwalkable state, so the do_print_stack
- local variable is used to suppress the stack walk after
- them.
-
-
-
-
Useful stuff to know about the interpreter
-
- The code generation scheme is straightforward (naive, in fact).
- -ddump-bcos prints each BCO along with the Core it
- was generated from, which is very handy.
-
-
Simple lets are compiled in-line. For the general case, let
- v = E in ..., E is compiled into a new BCO which takes as
- args its free variables, and v is bound to AP(the new BCO,
- free vars of E).
-
-
cases as usual, become: push the return
- continuation, enter the scrutinee. There is some magic to
- make all combinations of compiled/interpreted calls and
- returns work, described below. In the interpreted case, all
- case alts are compiled into a single big return BCO, which
- commences with instructions implementing a switch tree.
-
-
- ARGCHECK magic
-
- You may find ARGCHECK instructions at the start of BCOs which
- don't appear to need them; case continuations in particular.
- These play an important role: they force objects which should
- evaluated to BCOs to actually be BCOs.
-
- Typically, there may be an application node somewhere in the heap.
- This is a thunk which when leant on turns into a BCO for a return
- continuation. The thunk may get entered with an update frame on
- top of the stack. This is legitimate since from one viewpoint
- this is an AP which simply reduces to a data object, so does not
- have functional type. However, once the AP turns itself into a
- BCO (so to speak) we cannot simply enter the BCO, because that
- expects to see args on top of the stack, not an update frame.
- Therefore any BCO which expects something on the stack above an
- update frame, even non-function BCOs, start with an ARGCHECK. In
- this case it fails, the update is done, the update frame is
- removed, and the BCO re-entered. Subsequent entries of the BCO of
- course go unhindered.
-
- The optimised (#undef REFERENCE_INTERPRETER) handles
- this case specially, so that a trip through the scheduler is
- avoided. When reading traces from +RTS -D2 -RTS, you
- may see BCOs which appear to execute their initial ARGCHECK insn
- twice. The first time it fails; the interpreter does the update
- immediately and re-enters with no further comment.
-
- This is all a bit ugly, and, as SimonM correctly points out, it
- would have been cleaner to make BCOs unpointed (unthunkable)
- objects, so that a pointer to something :: BCO#
- really points directly at a BCO.
-
- Stack management
-
- There isn't any attempt to stub the stack, minimise its growth, or
- generally remove unused pointers ahead of time. This is really
- due to lazyness on my part, although it does have the minor
- advantage that doing something cleverer would almost certainly
- increase the number of bytecodes that would have to be executed.
- Of course we SLIDE out redundant stuff, to get the stack back to
- the sequel depth, before returning a HNF, but that's all. As
- usual this is probably a cause of major space leaks.
-
- Building constructors
-
- Constructors are built on the stack and then dumped into the heap
- with a single PACK instruction, which simply copies the top N
- words of the stack verbatim into the heap, adds an info table, and zaps N
- words from the stack. The constructor args are pushed onto the
- stack one at a time. One upshot of this is that unboxed values
- get pushed untaggedly onto the stack (via PUSH_UBX), because that's how they
- will be in the heap. That in turn means that the stack is not
- always walkable at arbitrary points in BCO execution, although
- naturally it is whenever GC might occur.
-
- Function closures created by the interpreter use the AP-node
- (tagged) format, so although their fields are similarly
- constructed on the stack, there is never a stack walkability
- problem.
-
- Unpacking constructors
-
- At the start of a case continuation, the returned constructor is
- unpacked onto the stack, which means that unboxed fields have to
- be tagged. Rather than burdening all such continuations with a
- complex, general mechanism, I split it into two. The
- allegedly-common all-pointers case uses a single UNPACK insn
- to fish out all fields with no further ado. The slow case uses a
- sequence of more complex UPK_TAG insns, one for each field (I
- think). This seemed like a good compromise to me.
-
- Perspective
-
- I designed the bytecode mechanism with the experience of both STG
- hugs and Classic Hugs in mind. The latter has an small
- set of bytecodes, a small interpreter loop, and runs amazingly
- fast considering the cruddy code it has to interpret. The former
- had a large interpretative loop with many different opcodes,
- including multiple minor variants of the same thing, which
- made it difficult to optimise and maintain, yet it performed more
- or less comparably with Classic Hugs.
-
- My design aims were therefore to minimise the interpreter's
- complexity whilst maximising performance. This means reducing the
- number of opcodes implemented, whilst reducing the number of insns
- despatched. In particular there are only two opcodes, PUSH_UBX
- and UPK_TAG, which deal with tags. STG Hugs had dozens of opcodes
- for dealing with tagged data. In cases where the common
- all-pointers case is significantly simpler (UNPACK) I deal with it
- specially. Finally, the number of insns executed is reduced a
- little by merging multiple pushes, giving PUSH_LL and PUSH_LLL.
- These opcode pairings were determined by using the opcode-pair
- frequency profiling stuff which is ifdef-d out in
- Interpreter.c. These significantly improve
- performance without having much effect on the uglyness or
- complexity of the interpreter.
-
- Overall, the interpreter design is something which turned out
- well, and I was pleased with it. Unfortunately I cannot say the
- same of the bytecode generator.
-
-
case returns between interpreted and compiled code
-
- Variants of the following scheme have been drifting around in GHC
- RTS documentation for several years. Since what follows is
- actually what is implemented, I guess it supersedes all other
- documentation. Beware; the following may make your brain melt.
- In all the pictures below, the stack grows downwards.
-
- Returning to interpreted code.
-
- Interpreted returns employ a set of polymorphic return infotables.
- Each element in the set corresponds to one of the possible return
- registers (R1, D1, F1) that compiled code will place the returned
- value in. In fact this is a bit misleading, since R1 can be used
- to return either a pointer or an int, and we need to distinguish
- these cases. So, supposing the set of return registers is {R1p,
- R1n, D1, F1}, there would be four corresponding infotables,
- stg_ctoi_ret_R1p_info, etc. In the pictures below we
- call them stg_ctoi_ret_REP_info.
-
- These return itbls are polymorphic, meaning that all 8 vectored
- return codes and the direct return code are identical.
-
- Before the scrutinee is entered, the stack is arranged like this:
-
- | |
- +--------+
- | BCO | -------> the return contination BCO
- +--------+
- | itbl * | -------> stg_ctoi_ret_REP_info, with all 9 codes as follows:
- +--------+
- BCO* bco = Sp[1];
- push R1/F1/D1 depending on REP
- push bco
- yield to sched
-
- On entry, the interpreted contination BCO expects the stack to look
- like this:
-
- | |
- +--------+
- | BCO | -------> the return contination BCO
- +--------+
- | itbl * | -------> ret_REP_ctoi_info, with all 9 codes as follows:
- +--------+
- : VALUE : (the returned value, shown with : since it may occupy
- +--------+ multiple stack words)
-
- A machine code return will park the returned value in R1/F1/D1,
- and enter the itbl on the top of the stack. Since it's our magic
- itbl, this pushes the returned value onto the stack, which is
- where the interpreter expects to find it. It then pushes the BCO
- (again) and yields. The scheduler removes the BCO from the top,
- and enters it, so that the continuation is interpreted with the
- stack as shown above.
-
- An interpreted return will create the value to return at the top
- of the stack. It then examines the return itbl, which must be
- immediately underneath the return value, to see if it is one of
- the magic stg_ctoi_ret_REP_info set. Since this is so,
- it knows it is returning to an interpreted contination. It
- therefore simply enters the BCO which it assumes it immediately
- underneath the itbl on the stack.
-
-
- Returning to compiled code.
-
- Before the scrutinee is entered, the stack is arranged like this:
-
- The scrutinee value is then entered.
- The case continuation(s) expect the stack to look the same, with
- the returned HNF in a suitable return register, R1, D1, F1 etc.
-
- A machine code return knows whether it is doing a vectored or
- direct return, and, if the former, which vector element it is.
- So, for a direct return we jump to Sp[0], and for a
- vectored return, jump to ((CodePtr*)(Sp[0]))[ - ITBL_LENGTH
- - vector number ]. This is (of course) the scheme that
- compiled code has been using all along.
-
- An interpreted return will, as described just above, have examined
- the itbl immediately beneath the return value it has just pushed,
- and found it not to be one of the ret_REP_ctoi_info set,
- so it knows this must be a return to machine code. It needs to
- pop the return value, currently on the stack, into R1/F1/D1, and
- jump through the info table. Unfortunately the first part cannot
- be accomplished directly since we are not in Haskellised-C world.
-
- We therefore employ a second family of magic infotables, indexed,
- like the first, on the return representation, and therefore with
- names of the form stg_itoc_ret_REP_info. (Note:
- itoc; the previous bunch were ctoi).
- This is pushed onto the stack (note, tagged values have their tag
- zapped), giving:
-
- | |
- +--------+
- | itbl * | -------> arbitrary machine code return itbl
- +--------+
- : VALUE : (the returned value, possibly multiple words)
- +--------+
- | itbl * | -------> stg_itoc_ret_REP_info, with code:
- +--------+
- pop myself (stg_itoc_ret_REP_info) off the stack
- pop return value into R1/D1/F1
- do standard machine code return to itbl at t.o.s.
-
- We then return to the scheduler, asking it to enter the itbl at
- t.o.s. When entered, stg_itoc_ret_REP_info removes
- itself from the stack, pops the return value into the relevant
- return register, and returns to the itbl to which we were trying
- to return in the first place.
-
- Amazingly enough, this stuff all actually works! Well, mostly ...
-
- Unboxed tuples: a Right Royal Spanner In The Works
-
- The above scheme depends crucially on having magic infotables
- stg_{itoc,ctoi}_ret_REP_info for each return
- representation REP. It unfortunately fails miserably
- in the face of unboxed tuple returns, because the set of required
- tables would be infinite; this despite the fact that for any given
- unboxed tuple return type, the scheme could be made to work fine.
-
- This is a serious problem, because it prevents interpreted
- code from doing IO-typed returns, since IO
- t is implemented as (# t, RealWorld# #) or
- thereabouts. This restriction in turn rules out FFI stuff in the
- interpreter. Not good.
-
- Although we have no way to make general unboxed tuples work, we
- can at least make IO-types work using the following
- ultra-kludgey observation: RealWorld# doesn't really
- exist and so has zero size, in compiled code. In turn this means
- that a type of the form (# t, RealWorld# #) has the
- same representation as plain t does. So the bytecode
- generator, whilst rejecting code with general unboxed tuple
- returns, recognises and accepts this special case. Which means
- that IO-typed stuff works in the interpreter. Just.
-
- If anyone asks, I will claim I was out of radio contact, on a
- 6-month walking holiday to the south pole, at the time this was
- ... er ... dreamt up.
-
-
-
-
- The GHC Commentary - Compiling and running the Main module
-
-
-
-
Compiling and running the Main module
-
-GHC allows you to determine which module contains the "main" function, and
-what that function is called, via the -fmain-is flag. The trouble is
-that the runtime system is fixed, so what symbol should it link to?
-
-The current solution is this. Suppose the main function is Foo.run.
-
-
-Then, when compiling module Foo, GHC adds an extra definition:
-
- :Main.main = runIO Foo.run
-
-Now the RTS can invoke :Main.main to start the program. (This extra
-definition is inserted in TcRnDriver.checkMain.)
-
-Before starting the program, though, the RTS also initialises the module tree
-by calling init_:Main, so when compiling the main module (Foo in this case),
-as well as generating init_Foo as usual, GHC also generates
-
- init_zcMain() { init_Foo; }
-
-This extra initialisation code is generated in CodeGen.mkModuleInit.
-
- The Evil Mangler (EM) is a Perl script invoked by the Glorious Driver after the C compiler (gcc) has
- translated the GHC-produced C code into assembly. Consequently, it is
- only of interest if -fvia-C is in effect (either explicitly
- or implicitly).
-
-
Its purpose
-
- The EM reads the assembly produced by gcc and re-arranges code blocks as
- well as nukes instructions that it considers non-essential. It
- derives it evilness from its utterly ad hoc, machine, compiler, and
- whatnot dependent design and implementation. More precisely, the EM
- performs the following tasks:
-
-
The code executed when a closure is entered is moved adjacent to
- that closure's infotable. Moreover, the order of the info table
- entries is reversed. Also, SRT pointers are removed from closures that
- don't need them (non-FUN, RET and THUNK ones).
-
Function prologue and epilogue code is removed. (GHC generated code
- manages its own stack and uses the system stack only for return
- addresses and during calls to C code.)
-
Certain code patterns are replaced by simpler code (eg, loads of
- fast entry points followed by indirect jumps are replaced by direct
- jumps to the fast entry point).
-
-
-
Implementation
-
- The EM is located in the Perl script ghc-asm.lprl.
- The script reads the .s file and chops it up into
- chunks (that's how they are actually called in the script) that
- roughly correspond to basic blocks. Each chunk is annotated with an
- educated guess about what kind of code it contains (e.g., infotable,
- fast entry point, slow entry point, etc.). The annotations also contain
- the symbol introducing the chunk of assembly and whether that chunk has
- already been processed or not.
-
- The parsing of the input into chunks as well as recognising assembly
- instructions that are to be removed or altered is based on a large
- number of Perl regular expressions sprinkled over the whole code. These
- expressions are rather fragile as they heavily rely on the structure of
- the generated code - in fact, they even rely on the right amount of
- white space and thus on the formatting of the assembly.
-
- Afterwards, the chunks are reordered, some of them purged, and some
- stripped of some useless instructions. Moreover, some instructions are
- manipulated (eg, loads of fast entry points followed by indirect jumps
- are replaced by direct jumps to the fast entry point).
-
- The EM knows which part of the code belongs to function prologues and
- epilogues as STG C adds tags of the
- form --- BEGIN --- and --- END --- the
- assembler just before and after the code proper of a function starts.
- It adds these tags using gcc's __asm__ feature.
-
- Update: Gcc 2.96 upwards performs more aggressive basic
- block re-ordering and dead code elimination. This seems to make the
- whole --- END --- tag business redundant -- in fact, if
- proper code is generated, no --- END --- tags survive gcc
- optimiser.
-
-
-
-Last modified: Sun Feb 17 17:55:47 EST 2002
-
-
-
-
diff --git a/docs/comm/the-beast/modules.html b/docs/comm/the-beast/modules.html
deleted file mode 100644
index a6655a68a760..000000000000
--- a/docs/comm/the-beast/modules.html
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
-
- The GHC Commentary - Modules, ModuleNames and Packages
-
-
-
-
Modules, ModuleNames and Packages
-
-
This section describes the datatypes ModuleName
- Module and PackageName all available
- from the module Module.
-
-
Packages
-
-
A package is a collection of (zero or more) Haskell modules,
- together with some information about external libraries, extra C
- compiler options, and other things that this collection of modules
- requires. When using DLLs on windows (or shared libraries on a
- Unix system; currently unsupported), a package can consist of only
- a single shared library of Haskell code; the reason for this is
- described below.
-
-
Packages are further described in the User's Guide here.
-
-
The ModuleName type
-
-
At the bottom of the hierarchy is a ModuleName,
- which, as its name suggests, is simply the name of a module. It
- is represented as a Z-encoded FastString, and is an instance of
- Uniquable so we can build FiniteMaps
- with ModuleNames as the keys.
-
-
A ModuleName can be built from a
- String, using the mkModuleName function.
-
-
The Module type
-
-
For a given module, the compiler also needs to know whether the
- module is in the home package, or in another package.
- This distinction is important for two reasons:
-
-
-
When generating code to call a function in another package,
- the compiler might have to generate a cross-DLL call, which is
- different from an intra-DLL call (hence the restriction that the
- code in a package can only reside in a single DLL).
-
-
We avoid putting version information in an interface file
- for entities defined in another package, on the grounds that other
- packages are generally "stable". This also helps keep the size of
- interface files down.
-
-
-
The Module type contains a ModuleName
- and a PackageInfo field. The
- PackageInfo indicates whether the given
- Module comes from the current package or from another
- package.
-
-
To get the actual package in which a given module resides, you
- have to read the interface file for that module, which contains
- the package name (actually the value of the
- -package-name flag when that module was built). This
- information is currently unused inside the compiler, but we might
- make use of it in the future, especially with the advent of
- hierarchical modules, to allow the compiler to automatically
- figure out which packages a program should be linked with, and
- thus avoid the need to specify -package options on
- the command line.
-
-
Modules are also instances of
- Uniquable, and indeed the unique of a
- Module is the same as the unique of the underlying
- ModuleName.
-
-
diff --git a/docs/comm/the-beast/names.html b/docs/comm/the-beast/names.html
deleted file mode 100644
index 061fae3ebfef..000000000000
--- a/docs/comm/the-beast/names.html
+++ /dev/null
@@ -1,169 +0,0 @@
-
-
-
-
- The GHC Commentary - The truth about names: OccNames, and Names
-
-
-
-
The GHC Commentary - The truth about names: OccNames, and Names
-
- Every entity (type constructor, class, identifier, type variable) has a
- Name. The Name type is pervasive in GHC, and
- is defined in basicTypes/Name.lhs. Here is what a Name
- looks like, though it is private to the Name module.
-
-
-
-data Name = Name {
- n_sort :: NameSort, -- What sort of name it is
- n_occ :: !OccName, -- Its occurrence name
- n_uniq :: Unique, -- Its identity
- n_loc :: !SrcLoc -- Definition site
- }
-
-
-
The n_sort field says what sort of name this is: see
- NameSort below.
-
The n_occ field gives the "occurrence name" of the
- Name; see
- OccName below.
-
The n_uniq field allows fast tests for equality of
- Names.
-
The n_loc field gives some indication of where the
- name was bound.
-
-data NameSort
- = External Module (Maybe Name)
- -- (Just parent) => this Name is a subordinate name of 'parent'
- -- e.g. data constructor of a data type, method of a class
- -- Nothing => not a subordinate
-
- | WiredIn Module (Maybe Name) TyThing BuiltInSyntax
- -- A variant of External, for wired-in things
-
- | Internal -- A user-defined Id or TyVar
- -- defined in the module being compiled
-
- | System -- A system-defined Id or TyVar. Typically the
- -- OccName is very uninformative (like 's')
-
-
-
Here are the sorts of Name an entity can have:
-
-
Class, TyCon: External.
-
Id: External, Internal, or System.
-
TyVar: Internal, or System.
-
-
-
An External name has a globally-unique
- (module name, occurrence name) pair, namely the
- original name of the entity,
- describing where the thing was originally defined. So for example,
- if we have
-
-
-module M where
- f = e1
- g = e2
-
-module A where
- import qualified M as Q
- import M
- a = Q.f + g
-
-
- then the RdrNames for "a", "Q.f" and "g" get replaced (by the
- Renamer) by the Names "A.a", "M.f", and "M.g" respectively.
-
-
-
An InternalName
- has only an occurrence name. Distinct InternalNames may have the same
- occurrence name; use the Unique to distinguish them.
-
-
An ExternalName has a unique that never changes. It
- is never cloned. This is important, because the simplifier invents
- new names pretty freely, but we don't want to lose the connnection
- with the type environment (constructed earlier). An
- InternalName name can be cloned freely.
-
-
Before CoreTidy: the Ids that were defined at top
- level in the original source program get ExternalNames,
- whereas extra top-level bindings generated (say) by the type checker
- get InternalNames. q This distinction is occasionally
- useful for filtering diagnostic output; e.g. for -ddump-types.
-
-
After CoreTidy: An Id with an
- ExternalName will generate symbols that
- appear as external symbols in the object file. An Id with an
- InternalName cannot be referenced from outside the
- module, and so generates a local symbol in the object file. The
- CoreTidy pass makes the decision about which names should be External
- and which Internal.
-
-
A System name is for the most part the same as an
- Internal. Indeed, the differences are purely cosmetic:
-
-
Internal names usually come from some name the
- user wrote, whereas a System name has an OccName like "a", or "t".
- Usually there are masses of System names with the same OccName but
- different uniques, whereas typically there are only a handful of
- distince Internal names with the same OccName.
-
-
Another difference is that when unifying the type checker tries
- to unify away type variables with System names, leaving ones with
- Internal names (to improve error messages).
-
- An OccName is more-or-less just a string, like "foo" or
- "Tree", giving the (unqualified) name of an entity.
-
-
- Well, not quite just a string, because in Haskell a name like "C" could
- mean a type constructor or data constructor, depending on context. So
- GHC defines a type OccName (defined in
- basicTypes/OccName.lhs) that is a pair of a FastString
- and a NameSpace indicating which name space the name is drawn
- from:
-
-
-data OccName = OccName NameSpace EncodedFS
-
-
- The EncodedFS is a synonym for FastString indicating
- that the string is Z-encoded. (Details in OccName.lhs.)
- Z-encoding encodes funny characters like '%' and '$' into alphabetic
- characters, like "zp" and "zd", so that they can be used in object-file
- symbol tables without confusing linkers and suchlike.
-
-
- The name spaces are:
-
-
-
VarName: ordinary variables
-
TvName: type variables
-
DataName: data constructors
-
TcClsName: type constructors and classes (in Haskell they
- share a name space)
- On some platforms (currently x86 and PowerPC, with bitrotted
- support for Sparc and Alpha), GHC can generate assembly code
- directly, without having to go via C. This can sometimes almost
- halve compilation time, and avoids the fragility and
- horribleness of the mangler. The NCG
- is enabled by default for
- non-optimising compilation on supported platforms. For most programs
- it generates code which runs only 1-3% slower
- (depending on platform and type of code) than that
- created by gcc on x86s, so it is well worth using even with
- optimised compilation. FP-intensive x86 programs see a bigger
- slowdown, and all Sparc code runs about 5% slower due to
- us not filling branch delay slots.
-
- The NCG has always been something of a second-class citizen
- inside GHC, an unloved child, rather. This means that its
- integration into the compiler as a whole is rather clumsy, which
- brings some problems described below. That apart, the NCG
- proper is fairly cleanly designed, as target-independent as it
- reasonably can be, and so should not be difficult to retarget.
-
- NOTE! The native code generator was largely rewritten as part
- of the C-- backend changes, around May 2004. Unfortunately the
- rest of this document still refers to the old version, and was written
- with relation to the CVS head as of end-Jan 2002. Some of it is relevant,
- some of it isn't.
-
-
- The returned SDoc is for debugging, so is empty unless
- you specify -ddump-stix. The Pretty.Doc
- bit is the final assembly code. Translation involves three main
- phases, the first and third of which are target-independent.
-
-
Translation into the Stix representation. Stix
- is a simple tree-like RTL-style language, in which you can
- mention:
-
-
-
An infinite number of temporary, virtual registers.
-
The STG "magic" registers (MagicId), such as
- the heap and stack pointers.
-
Literals and low-level machine ops (MachOp).
-
Simple address computations.
-
Reads and writes of: memory, virtual regs, and various STG
- regs.
-
Labels and if ... goto ... style control-flow.
-
-
- Stix has two main associated types:
-
-
-
StixStmt -- trees executed for their side
- effects: assignments, control transfers, and auxiliary junk
- such as segment changes and literal data.
-
StixExpr -- trees which denote a value.
-
-
- Translation into Stix is almost completely
- target-independent. Needed dependencies are knowledge of
- word size and endianness, used when generating code to do
- deal with half-word fields in info tables. This could be
- abstracted out easily enough. Also, the Stix translation
- needs to know which MagicIds map to registers
- on the given target, and which are stored in offsets from
- BaseReg.
-
- After initial Stix generation, the trees are cleaned up with
- constant-folding and a little copy-propagation ("Stix
- inlining", as the code misleadingly calls it). We take
- the opportunity to translate MagicIds which are
- stored in memory on the given target, into suitable memory
- references. Those which are stored in registers are left
- alone. There is also a half-hearted attempt to lift literal
- strings to the top level in cases where nested strings have
- been observed to give incorrect code in the past.
-
- Primitive machine-level operations will already be phrased in
- terms of MachOps in the presented Abstract C, and
- these are passed through unchanged. We comment only that the
- MachOps have been chosen so as to be easy to
- implement on all targets, and their meaning is intended to be
- unambiguous, and the same on all targets, regardless of word
- size or endianness.
-
- A note on MagicIds.
- Those which are assigned to
- registers on the current target are left unmodified. Those
- which are not are stored in memory as offsets from
- BaseReg (which is assumed to permanently have the
- value (&MainCapability.r)), so the constant folder
- calculates the offsets and inserts suitable loads/stores. One
- complication is that not all archs have BaseReg
- itself in a register, so for those (sparc), we instead
- generate the address as an offset from the static symbol
- MainCapability, since the register table lives in
- there.
-
- Finally, BaseReg does occasionally itself get
- mentioned in Stix expression trees, and in this case what is
- denoted is precisely (&MainCapability.r), not, as
- in all other cases, the value of memory at some offset from
- the start of the register table. Since what it denotes is an
- r-value and not an l-value, assigning BaseReg is
- meaningless, so the machinery checks to ensure this never
- happens. All these details are taken into account by the
- constant folder.
-
-
Instruction selection. This is the only majorly
- target-specific phase. It turns Stix statements and
- expressions into sequences of Instr, a data
- type which is different for each architecture.
- Instr, unsurprisingly, has various supporting
- types, such as Reg, Operand,
- Imm, etc. The generated instructions may refer
- to specific machine registers, or to arbitrary virtual
- registers, either those created within the instruction
- selector, or those mentioned in the Stix passed to it.
-
- The instruction selectors live in MachCode.lhs.
- The core functions, for each target, are:
-
- The insn selectors use the "maximal munch" algorithm. The
- bizarrely-misnamed getRegister translates
- expressions. A simplified version of its type is:
-
- That is: it (monadically) turns a StixExpr into a
- sequence of instructions, and a register, with the meaning
- that after executing the (possibly empty) sequence of
- instructions, the (possibly virtual) register will
- hold the resulting value. The real situation is complicated
- by the presence of fixed registers, and is detailed below.
-
- Maximal munch is a greedy algorithm and is known not to give
- globally optimal code sequences, but it is good enough, and
- fast and simple. Early incarnations of the NCG used something
- more sophisticated, but that is long gone now.
-
- Similarly, getAmode translates a value, intended
- to denote an address, into a sequence of insns leading up to
- a (processor-specific) addressing mode. This stuff could be
- done using the general getRegister selector, but
- would necessarily generate poorer code, because the calculated
- address would be forced into a register, which might be
- unnecessary if it could partially or wholly be calculated
- using an addressing mode.
-
- Finally, assignMem_IntCode and
- assignReg_IntCode create instruction sequences to
- calculate a value and store it in the given register, or at
- the given address. Because these guys translate a statement,
- not a value, they just return a sequence of insns and no
- associated register. Floating-point and 64-bit integer
- assignments have analogous selectors.
-
- Apart from the complexities of fixed vs floating registers,
- discussed below, the instruction selector is as simple
- as it can be. It looks long and scary but detailed
- examination reveals it to be fairly straightforward.
-
-
Register allocation. The register allocator,
- AsmRegAlloc.lhs takes sequences of
- Instrs which mention a mixture of real and
- virtual registers, and returns a modified sequence referring
- only to real ones. It is gloriously and entirely
- target-independent. Well, not exactly true. Instead it
- regards Instr (instructions) and Reg
- (virtual and real registers) as abstract types, to which it has
- the following interface:
-
- insnFuture is used to (re)construct the graph of
- all possible control transfers between the insns to be
- allocated. regUsage returns the sets of registers
- read and written by an instruction. And
- patchRegs is used to apply the allocator's final
- decision on virtual-to-real reg mapping to an instruction.
-
- Clearly these 3 fns have to be written anew for each
- architecture. They are defined in
- RegAllocInfo.lhs. Think twice, no, thrice,
- before modifying them: making false claims about insn
- behaviour will lead to hard-to-find register allocation
- errors.
-
- AsmRegAlloc.lhs contains detailed comments about
- how the allocator works. Here is a summary. The head honcho
-
- takes a list of instructions and a list of real registers
- available for allocation, and maps as many of the virtual regs
- in the input into real ones as it can. The returned
- Bool indicates whether or not it was
- successful. If so, that's the end of it. If not, the caller
- of allocUsingTheseRegs will attempt spilling.
- More of that later. What allocUsingTheseRegs
- does is:
-
-
-
Implicitly number each instruction by its position in the
- input list.
-
-
Using insnFuture, create the set of all flow
- edges -- possible control transfers -- within this set of
- insns.
-
-
Using regUsage and iterating around the flow
- graph from the previous step, calculate, for each virtual
- register, the set of flow edges on which it is live.
-
-
Make a real-register committment map, which gives the set
- of edges for which each real register is committed (in
- use). These sets are initially empty. For each virtual
- register, attempt to find a real register whose current
- committment does not intersect that of the virtual
- register -- ie, is uncommitted on all edges that the
- virtual reg is live. If successful, this means the vreg
- can be assigned to the realreg, so add the vreg's set to
- the realreg's committment.
-
-
If all the vregs were assigned to a realreg, use
- patchInstr to apply the mapping to the insns themselves.
-
-
- Spilling
-
- If allocUsingTheseRegs fails, a baroque
- mechanism comes into play. We now know that much simpler
- schemes are available to do the same thing and give better
- results.
- Anyways:
-
- The logic above allocUsingTheseRegs, in
- doGeneralAlloc and runRegAllocate,
- observe that allocation has failed with some set R of real
- registers. So they apply runRegAllocate a second
- time to the code, but remove (typically) two registers from R
- before doing so. This naturally fails too, but returns a
- partially-allocated sequence. doGeneralAlloc
- then inserts spill code into the sequence, and finally re-runs
- allocUsingTheseRegs, but supplying the original,
- unadulterated R. This is guaranteed to succeed since the two
- registers previously removed from R are sufficient to allocate
- all the spill/restore instructions added.
-
- Because x86 is very short of registers, and in the worst case
- needs three removed from R, a softly-softly approach is used.
- doGeneralAlloc first tries with zero regs removed
- from R, then if that fails one, then two, etc. This means
- allocUsingTheseRegs may get run several times
- before a successful arrangement is arrived at.
- findReservedRegs cooks up the sets of spill
- registers to try with.
-
- The resulting machinery is complicated and the generated spill
- code is appalling. The saving grace is that spills are very
- rare so it doesn't matter much. I did not invent this -- I inherited it.
-
- Dealing with common cases fast
-
- The entire reg-alloc mechanism described so far is general and
- correct, but expensive overkill for many simple code blocks.
- So to begin with we use
- doSimpleAlloc, which attempts to do something
- simple. It exploits the observation that if the total number
- of virtual registers does not exceed the number of real ones
- available, we can simply dole out a new realreg each time we
- see mention of a new vreg, with no regard for control flow.
- doSimpleAlloc therefore attempts this in a
- single pass over the code. It gives up if it runs out of real
- regs or sees any condition which renders the above observation
- invalid (fixed reg uses, for example).
-
- This clever hack handles the majority of code blocks quickly.
- It was copied from the previous reg-allocator (the
- Mattson/Partain/Marlow/Gill one).
-
-
-
-
Complications, observations, and possible improvements
-
-
Real vs virtual registers in the instruction selectors
-
-The instruction selectors for expression trees, namely
-getRegister, are complicated by the fact that some
-expressions can only be computed into a specific register, whereas
-the majority can be computed into any register. We take x86 as an
-example, but the problem applies to all archs.
-
-Terminology: rreg means real register, a real machine
-register. vreg means one of an infinite set of virtual
-registers. The type Reg is the sum of rreg and
-vreg. The instruction selector generates sequences with
-unconstrained use of vregs, leaving the register allocator to map them
-all into rregs.
-
-Now, where was I ? Oh yes. We return to the type of
-getRegister, which despite its name, selects instructions
-to compute the value of an expression tree.
-
- getRegister :: StixExpr -> NatM Register
-
- data Register
- = Fixed PrimRep Reg InstrBlock
- | Any PrimRep (Reg -> InstrBlock)
-
- type InstrBlock -- sequence of instructions
-
-At first this looks eminently reasonable (apart from the stupid
-name). getRegister, and nobody else, knows whether or
-not a given expression has to be computed into a fixed rreg or can be
-computed into any rreg or vreg. In the first case, it returns
-Fixed and indicates which rreg the result is in. In the
-second case it defers committing to any specific target register by
-returning a function from Reg to InstrBlock,
-and the caller can specify the target reg as it sees fit.
-
-Unfortunately, that forces getRegister's callers (usually
-itself) to use a clumsy and confusing idiom in the common case where
-they do not care what register the result winds up in. The reason is
-that although a value might be computed into a fixed rreg, we are
-forbidden (on pain of segmentation fault :) from subsequently
-modifying the fixed reg. This and other rules are record in "Rules of
-the game" inside MachCode.lhs.
-
-Why can't fixed registers be modified post-hoc? Consider a simple
-expression like Hp+1. Since the heap pointer
-Hp is definitely in a fixed register, call it R,
-getRegister on subterm Hp will simply return
-Fixed with an empty sequence and R. But we can't just
-emit an increment instruction for R, because that trashes
-Hp; instead we first have to copy it into a fresh vreg
-and increment that.
-
-With all that in mind, consider now writing a getRegister
-clause for terms of the form (1 + E). Contrived, yes,
-but illustrates the matter. First we do
-getRegister on E. Now we are forced to examine what
-comes back.
-
- getRegister (OnePlus e)
- = getRegister e `thenNat` \ e_result ->
- case e_result of
- Fixed e_code e_fixed
- -> returnNat (Any IntRep (\dst -> e_code ++ [MOV e_fixed dst, INC dst]))
- Any e_any
- -> Any (\dst -> e_any dst ++ [INC dst])
-
-This seems unreasonably cumbersome, yet the instruction selector is
-full of such idioms. A good example of the complexities induced by
-this scheme is shown by trivialCode for x86 in
-MachCode.lhs. This deals with general integer dyadic
-operations on x86 and has numerous cases. It was difficult to get
-right.
-
-An alternative suggestion is to simplify the type of
-getRegister to this:
-
- getRegister :: StixExpr -> NatM (InstrBloc, VReg)
- type VReg = .... a vreg ...
-
-which is about as straightforward as you could hope for.
-Unfortunately, it requires getRegister to insert moves of
-values which naturally compute into an rreg, into a vreg. Consider:
-
- 1 + ccall some-C-fn
-
-On x86 the ccall result is returned in rreg %eax. The
-resulting sequence, prior to register allocation, would be:
-
-If, as is likely, %eax is not held live beyond this point
-for any other purpose, the move into a fresh register is pointless;
-we'd have been better off leaving the value in %eax as
-long as possible.
-
-The simplified getRegister story is attractive. It would
-clean up the instruction selectors significantly and make it simpler
-to write new ones. The only drawback is that it generates redundant
-register moves. I suggest that eliminating these should be the job
-of the register allocator. Indeed:
-
-
There has been some work on this already ("Iterated register
- coalescing" ?), so this isn't a new idea.
-
-
You could argue that the existing scheme inappropriately blurs the
- boundary between the instruction selector and the register
- allocator. The instruction selector should .. well .. just
- select instructions, without having to futz around worrying about
- what kind of registers subtrees get generated into. Register
- allocation should be entirely the domain of the register
- allocator, with the proviso that it should endeavour to allocate
- registers so as to minimise the number of non-redundant reg-reg
- moves in the final output.
-
-
-
-
Selecting insns for 64-bit values/loads/stores on 32-bit platforms
-
-Note that this stuff doesn't apply on 64-bit archs, since the
-getRegister mechanism applies there.
-
-The relevant functions are:
-
-iselExpr64 is the 64-bit, plausibly-named analogue of
-getRegister, and ChildCode64 is the analogue
-of Register. The aim here was to generate working 64
-bit code as simply as possible. To this end, I used the
-simplified getRegister scheme described above, in which
-iselExpr64generates its results into two vregs which
-can always safely be modified afterwards.
-
-Virtual registers are, unsurprisingly, distinguished by their
-Uniques. There is a small difficulty in how to
-know what the vreg for the upper 32 bits of a value is, given the vreg
-for the lower 32 bits. The simple solution adopted is to say that
-any low-32 vreg may also have a hi-32 counterpart which shares the
-same unique, but is otherwise regarded as a separate entity.
-getHiVRegFromLo gets one from the other.
-
- data VRegUnique
- = VRegUniqueLo Unique -- lower part of a split quantity
- | VRegUniqueHi Unique -- upper part thereof
-
-Apart from that, 64-bit code generation is really simple. The sparc
-and x86 versions are almost copy-n-pastes of each other, with minor
-adjustments for endianness. The generated code isn't wonderful but
-is certainly acceptable, and it works.
-
-
-
-
Shortcomings and inefficiencies in the register allocator
-
-
Redundant reconstruction of the control flow graph
-
-The allocator goes to considerable computational expense to construct
-all the flow edges in the group of instructions it's allocating for,
-by using the insnFuture function in the
-Instr pseudo-abstract type.
-
-This is really silly, because all that information is present at the
-abstract C stage, but is thrown away in the translation to Stix.
-So a good thing to do is to modify that translation to
-produce a directed graph of Stix straight-line code blocks,
-and to preserve that structure through the insn selector, so the
-allocator can see it.
-
-This would eliminate the fragile, hacky, arch-specific
-insnFuture mechanism, and probably make the whole
-compiler run measurably faster. Register allocation is a fair chunk
-of the time of non-optimising compilation (10% or more), and
-reconstructing the flow graph is an expensive part of reg-alloc.
-It would probably accelerate the vreg liveness computation too.
-
-
Really ridiculous method for doing spilling
-
-This is a more ambitious suggestion, but ... reg-alloc should be
-reimplemented, using the scheme described in "Quality and speed in
-linear-scan register allocation." (Traub?) For straight-line code
-blocks, this gives an elegant one-pass algorithm for assigning
-registers and creating the minimal necessary spill code, without the
-need for reserving spill registers ahead of time.
-
-I tried it in Rigr, replacing the previous spiller which used the
-current GHC scheme described above, and it cut the number of spill
-loads and stores by a factor of eight. Not to mention being simpler,
-easier to understand and very fast.
-
-The Traub paper also describes how to extend their method to multiple
-basic blocks, which will be needed for GHC. It comes down to
-reconciling multiple vreg-to-rreg mappings at points where control
-flow merges.
-
-
Redundant-move support for revised instruction selector suggestion
-
-As mentioned above, simplifying the instruction selector will require
-the register allocator to try and allocate source and destination
-vregs to the same rreg in reg-reg moves, so as to make as many as
-possible go away. Without that, the revised insn selector would
-generate worse code than at present. I know this stuff has been done
-but know nothing about it. The Linear-scan reg-alloc paper mentioned
-above does indeed mention a bit about it in the context of single
-basic blocks, but I don't know if that's sufficient.
-
-
-
-
x86 arcana that you should know about
-
-The main difficulty with x86 is that many instructions have fixed
-register constraints, which can occasionally make reg-alloc fail
-completely. And the FPU doesn't have the flat register model which
-the reg-alloc abstraction (implicitly) assumes.
-
-Our strategy is: do a good job for the common small subset, that is
-integer loads, stores, address calculations, basic ALU ops (+, -,
-and, or, xor), and jumps. That covers the vast majority of
-executed insns. And indeed we do a good job, with a loss of
-less than 2% compared with gcc.
-
-Initially we tried to handle integer instructions with awkward
-register constraints (mul, div, shifts by non-constant amounts) via
-various jigglings of the spiller et al. This never worked robustly,
-and putting platform-specific tweaks in the generic infrastructure is
-a big No-No. (Not quite true; shifts by a non-constant amount are
-still done by a giant kludge, and should be moved into this new
-framework.)
-
-Fortunately, all such insns are rare. So the current scheme is to
-pretend that they don't have any such constraints. This fiction is
-carried all the way through the register allocator. When the insn
-finally comes to be printed, we emit a sequence which copies the
-operands through memory (%esp-relative), satisfying the
-constraints of the real instruction. This localises the gruesomeness
-to just one place. Here, for example, is the code generated for
-integer divison of %esi by %ecx:
-
-This is not quite as appalling as it seems, if you consider that the
-division itself typically takes 16+ cycles, whereas the rest of the
-insns probably go through in about 1 cycle each.
-
-This trick is taken to extremes for FP operations.
-
-All notions of the x86 FP stack and its insns have been removed.
-Instead, we pretend, to the instruction selector and register
-allocator, that x86 has six floating point registers,
-%fake0 .. %fake5, which can be used in the
-usual flat manner. We further claim that x86 has floating point
-instructions very similar to SPARC and Alpha, that is, a simple
-3-operand register-register arrangement. Code generation and register
-allocation proceed on this basis.
-
-When we come to print out the final assembly, our convenient fiction
-is converted to dismal reality. Each fake instruction is
-independently converted to a series of real x86 instructions.
-%fake0 .. %fake5 are mapped to
-%st(0) .. %st(5). To do reg-reg arithmetic
-operations, the two operands are pushed onto the top of the FP stack,
-the operation done, and the result copied back into the relevant
-register. When one of the operands is also the destination, we emit a
-slightly less scummy translation. There are only six
-%fake registers because 2 are needed for the translation,
-and x86 has 8 in total.
-
-The translation is inefficient but is simple and it works. A cleverer
-translation would handle a sequence of insns, simulating the FP stack
-contents, would not impose a fixed mapping from %fake to
-%st regs, and hopefully could avoid most of the redundant
-reg-reg moves of the current translation.
-
-There are, however, two unforeseen bad side effects:
-
-
This doesn't work properly, because it doesn't observe the normal
- conventions for x86 FP code generation. It turns out that each of
- the 8 elements in the x86 FP register stack has a tag bit which
- indicates whether or not that register is notionally in use or
- not. If you do a FPU operation which happens to read a
- tagged-as-empty register, you get an x87 FPU (stack invalid)
- exception, which is normally handled by the FPU without passing it
- to the OS: the program keeps going, but the resulting FP values
- are garbage. The OS can ask for the FPU to pass it FP
- stack-invalid exceptions, but it usually doesn't.
-
- Anyways: inside NCG created x86 FP code this all works fine.
- However, the NCG's fiction of a flat register set does not operate
- the x87 register stack in the required stack-like way. When
- control returns to a gcc-generated world, the stack tag bits soon
- cause stack exceptions, and thus garbage results.
-
- The only fix I could think of -- and it is horrible -- is to clear
- all the tag bits just before the next STG-level entry, in chunks
- of code which use FP insns. i386_insert_ffrees
- inserts the relevant ffree insns into such code
- blocks. It depends critically on is_G_instr to
- detect such blocks.
-
-
It's very difficult to read the generated assembly and
- reason about it when debugging, because there's so much clutter.
- We print the fake insns as comments in the output, and that helps
- a bit.
-
-
-
-
-
Generating code for ccalls
-
-For reasons I don't really understand, the instruction selectors for
-generating calls to C (genCCall) have proven surprisingly
-difficult to get right, and soaked up a lot of debugging time. As a
-result, I have once again opted for schemes which are simple and not
-too difficult to argue as correct, even if they don't generate
-excellent code.
-
-The sparc ccall generator in particular forces all arguments into
-temporary virtual registers before moving them to the final
-out-registers (%o0 .. %o5). This creates
-some unnecessary reg-reg moves. The reason is explained in a
-comment in the code.
-
-
-
Duplicate implementation for many STG macros
-
-This has been discussed at length already. It has caused a couple of
-nasty bugs due to subtle untracked divergence in the macro
-translations. The macro-expander really should be pushed up into the
-Abstract C phase, so the problem can't happen.
-
-Doing so would have the added benefit that the NCG could be used to
-compile more "ways" -- well, at least the 'p' profiling way.
-
-
-
How to debug the NCG without losing your sanity/hair/cool
-
-Last, but definitely not least ...
-
-The usual syndrome is that some program, when compiled via C, works,
-but not when compiled via the NCG. Usually the problem is fairly
-simple to fix, once you find the specific code block which has been
-mistranslated. But the latter can be nearly impossible, since most
-modules generate at least hundreds and often thousands of them.
-
-My solution: cheat.
-
-Because the via-C and native routes diverge only late in the day,
-it is not difficult to construct a 1-1 correspondence between basic
-blocks on the two routes. So, if the program works via C but not on
-the NCG, do the following:
-
-
Recompile AsmCodeGen.lhs in the afflicted compiler
- with -DDEBUG_NCG, so that it inserts
- ___ncg_debug_markers
- into the assembly it emits.
-
-
Using a binary search on modules, find the module which is causing
- the problem.
-
-
Compile that module to assembly code, with identical flags, twice,
- once via C and once via NCG.
- Call the outputs ModuleName.s-gcc and
- ModuleName.s-nat. Check that the latter does indeed have
- ___ncg_debug_markers in it; otherwise the next steps fail.
-
-
Build (with a working compiler) the program
- fptools/ghc/utils/debugNCG/diff_gcc_nat.
-
-
Run: diff_gcc_nat ModuleName.s. This will
- construct the 1-1 correspondence, and emits on stdout
- a cppable assembly output. Place this in a file -- I always
- call it synth.S. Note, the capital S is important;
- otherwise it won't get cpp'd. You can feed this file directly to
- ghc and it will automatically get cpp'd; you don't have to do so
- yourself.
-
-
By messing with the #defines at the top of
- synth.S, do a binary search to find the incorrect
- block. Keep a careful record of where you are in the search; it
- is easy to get confused. Remember also that multiple blocks may
- be wrong, which also confuses matters. Finally, I usually start
- off by re-checking that I can build the executable with all the
- #defines set to 0 and then all to 1. This ensures
- you won't get halfway through the search and then get stuck due to
- some snafu with gcc-specific literals. Usually I set
- UNMATCHED_GCC to 1 all the time, and this bit should
- contain only literal data.
- UNMATCHED_NAT should be empty.
-
-
-diff_gcc_nat was known to work correctly last time I used
-it, in December 01, for both x86 and sparc. If it doesn't work, due
-to changes in assembly syntax, or whatever, make it work. The
-investment is well worth it. Searching for the incorrect block(s) any
-other way is a total time waster.
-
-
-
-
-
-
-
-
-
New global speculation-depth register; always counts the number of specuation frames
-on the stack; incremented when
-starting speculation, decremented when finishing.
-
Profiling stuff
-
-
-
-
Speculation frames
-
-The info table for a speculation frame points to the static spec-depth configuration
-for that speculation point. (Points to, because the config is mutable, and the info
-table has to be adjacent to the (immutable) code.)
-
-
-
-
Abortion
-
-Abortion is modelled by a special asynchronous exception ThreadAbort.
-
-
-
In the scheduler, if a thread returns with ThreadBlocked, and non-zero SpecDepth, send it
-an asynchronous exception.
-
-
In the implementation of the catch# primop, raise an asynchonous exception if
-SpecDepth is nonzero.
-
-
Timeout, administered by scheduler. Current story: abort if a speculation frame lasts from
-one minor GC to the next. We detect this by seeing if there's a profiling frame on the stack --- a
-profiling frame is added at a minor GC in place of a speculation frame (see Online Profiling).
-
-
-
-When tearing frames off the stack, we start a new chunk at every speculation frame, as well as every
-update frame. We proceed down to the deepest speculation frame.
-
-The AP_STACK closure built for a speculation frame must be careful not to enter the
-next AP_STACK closure up, because that would re-enter a possible loop.
-
-Delivering an asynch exception to a thread that is speculating. Invariant: there can be no catch frames
-inside speculation (we abort in catch# when speculating. So the asynch exception just
-tears off frames in the standard way until it gets to a catch frame, just as it would usually do.
-
-Abortion can punish one or more of the speculation frames by decrementing their static config variables.
-
-
Synchronous exceptions
-
-Synchronous exceptions are treated similarly as before. The stack is discarded up to an update frame; the
-thunk to be updated is overwritten with "raise x", and the process continues. Until a catch frame.
-
-When we find a spec frame, we allocate a "raise x" object, and resume execution with the return address
-in the spec frame. In that way the spec frame is like a catch frame; it stops the unwinding process.
-
-It's essential that every hard failure is caught, else speculation is unsafe. In particular, divide by zero
-is hard to catch using OS support, so we test explicitly in library code. You can shoot yourself in the foot
-by writing x `div#` 0, side-stepping the test.
-
-
-
Online profiling
-
-Sampling can be more frequent than minor GC (by jiggling the end-of-block code) but cannot
-be less frequent, because GC doesn't expect to see profiling frames.
\ No newline at end of file
diff --git a/docs/comm/the-beast/prelude.html b/docs/comm/the-beast/prelude.html
deleted file mode 100644
index 64b607def502..000000000000
--- a/docs/comm/the-beast/prelude.html
+++ /dev/null
@@ -1,207 +0,0 @@
-
-
-
-
- The GHC Commentary - Primitives and the Prelude
-
-
-
-
The GHC Commentary - Primitives and the Prelude
-
- One of the trickiest aspects of GHC is the delicate interplay
- between what knowledge is baked into the compiler, and what
- knowledge it gets by reading the interface files of library
- modules. In general, the less that is baked in, the better.
-
- Most of what the compiler has to have wired in about primitives and
- prelude definitions is in
- fptools/ghc/compiler/prelude/.
-
-
-GHC recognises these main classes of baked-in-ness:
-
-
Primitive types.
-
Primitive types cannot be defined in Haskell, and are utterly baked into the compiler.
-They are notionally defined in the fictional module GHC.Prim. The TyCons for these types are all defined
-in module TysPrim; for example,
-
- intPrimTyCon :: TyCon
- intPrimTyCon = ....
-
-Examples:
-Int#, Float#, Addr#, State#.
-
-
Wired-in types.
-
Wired-in types can be defined in Haskell, and indeed are (many are defined in GHC.Base).
-However, it's very convenient for GHC to be able to use the type constructor for (say) Int
-without looking it up in any environment. So module TysWiredIn contains many definitions
-like this one:
-
-However, since a TyCon value contains the entire type definition inside it, it follows
-that the complete definition of Int is thereby baked into the compiler.
-
-Nevertheless, the library module GHC.Base still contains a definition for Int
-just so that its info table etc get generated somewhere. Chaos will result if the wired-in definition
-in TysWiredIn differs from that in GHC.Base.
-
-The rule is that only very simple types should be wired in (for example, Ratio is not,
-and IO is certainly not). No class is wired in: classes are just too complicated.
-
-Examples: Int, Float, List, tuples.
-
-
-
Known-key things.
-
GHC knows of the existence of many, many other types, classes and values. But all it knows is
-their Name. Remember, a Name includes a unique key that identifies the
-thing, plus its defining module and occurrence name
-(see The truth about Names). Knowing a Name, therefore, GHC can
-run off to the interface file for the module and find out everything else it might need.
-
-Most of these known-key names are defined in module PrelNames; a further swathe concerning
-Template Haskell are defined in DsMeta. The allocation of unique keys is done manually;
-chaotic things happen if you make a mistake here, which is why they are all together.
-
-
-All the Names from all the above categories are used to initialise the global name cache,
-which maps (module,occurrence-name) pairs to the globally-unique Name for that
-thing. (See HscMain.initOrigNames.)
-
-
-The next sections elaborate these three classes a bit.
-
-
-
Primitives (module TysPrim)
-
- Some types and functions have to be hardwired into the compiler as they
- are atomic; all other code is essentially built around this primitive
- functionality. This includes basic arithmetic types, such as integers,
- and their elementary operations as well as pointer types. Primitive
- types and functions often receive special treatment in the code
- generator, which means that these entities have to be explicitly
- represented in the compiler. Moreover, many of these types receive some
- explicit treatment in the runtime system, and so, there is some further
- information about primitives in
- the RTS section of this document.
-
- The module TysPrim
- exports a list of all primitive type constructors as primTyCons ::
- [TyCon]. All of these type constructors (of type
- TyCon) are also exported as intPrimTyCon,
- stablePtrPrimTyCon, and so on. In addition, for each
- nullary type constructor the corresponding type (of type
- Type) is also exported; for example, we have
- intPrimTy :: Type. For all other type constructors, a
- function is exported that constructs the type obtained by applying the
- type constructors to an argument type (of type Type); for
- example, we have mkStablePtrPrimTy :: Type -> Type.
-
- As it is inconvenient to identify type that receive a special treatment
- by the code generator by looking at their name, the module PrimRep
- exports a data type PrimRep, which lists all
- machine-manipulable implementation types. The module also exports a set
- of query functions on PrimRep that define properties, such
- as a type's byte size or whether a primitive type is a pointer type.
- Moreover, the function TysPrim.primRepTyCon :: PrimRep ->
- TyCon converts PrimRep values into the corresponding
- type constructor.
-
-
Wired in types (module TysWiredIn)
-
- In addition to entities that are primitive, as the compiler has to treat
- them specially in the backend, there is a set of types, functions,
- etc. that the Haskell language definition flags as essential to the
- language by placing them into the special module Prelude
- that is implicitly imported into each Haskell module. For some of these
- entities it suffices to define them (by standard Haskell definitions) in
- a Prelude module and ensuring that this module is treated
- specially by being always imported .
-
- However, there is a set of entities (such as, for example, the list type
- and the corresponding data constructors) that have an inbetween status:
- They are not truly primitive (lists, for example, can easily be defined
- by a data declaration), but the compiler has to have extra
- knowledge about them, as they are associated with some particular
- features of the language (in the case of lists, there is special syntax,
- such as list comprehensions, associated with the type). Another
- example, for a special kind of entity are type classes that can be used
- in a deriving clause. All types that are not-primitive,
- but about which the compiler nonetheless has to have some extra
- knowledge are defined in the module TysWiredIn.
-
- All wired in type constructors are contained in wiredInTyCons ::
- [TyCon]. In addition to that list, TysWiredIn
- exports variables bound to representations of all listed type
- constructors and their data constructors. So, for example, we have
- listTyCon together with nilDataCon and
- consDataCon. There are also convenience functions, such
- as mkListTy and mkTupleTy, which construct
- compound types.
-
-
-
Known-key names (module PrelNames)
-
- All names of types, functions, etc. known to the compiler are defined in
- PrelNames.
- This includes the names of types and functions exported from
- TysWiredIn, but also others. In particular, this module
- also fixes the names of all prelude modules; i.e., of the modules whose
- name starts with Prel, which GHC's library uses to bring
- some structure into the quite large number of Prelude
- definitions.
-
- PrelNames.knownKeyNames :: [Name] contains all names known
- to the compiler, but the elements of the list are also exported
- individually as variables, such as floatTyConName (having
- the lexeme Float) and floatDataConName (having
- the lexeme F#). For each of these names,
- PrelNames derfines a unique key with a definition, such as
-
-
-floatPrimTyConKey = mkPreludeTyConUnique 11
-
-
- that is, all unique keys for known prelude names are hardcoded into
- PrelNames (and uniqueness has to be manually ensured in
- that module). To simplify matching the types of important groups of
- type constructors, PrelNames also exports lists, such as
- numericTyKeys (keys of all numeric types), that contain the
- unique keys of all names in that group. In addition, derivable type
- classes and their structure is defined by
- derivableClassKeys and related definitions.
-
- In addition to names that have unique keys, PrelNames also
- defines a set of names without uniqueness information. These names end
- on the suffix _RDR and are of type RdrName (an
- example, is times_RDR, which represents the lexeme
- *). The names are used in locations where they pass
- through the renamer anyway (e.g., special constructors encountered by
- the parser, such as [], and code generated from deriving clauses), which
- will take care of adding uniqueness information.
-
-
-
Gathering it all together (module PrelInfo)
- The module
- PrelInfo
- in some sense ties all the above together and provides a reasonably
- restricted interface to these definition to the rest of the compiler.
- However, from what I have seen, this doesn't quite work out and the
- earlier mentioned modules are directly imported in many places.
-
-
-
- The GHC Commentary - The Glorious Renamer
-
-
-
-
The GHC Commentary - The Glorious Renamer
-
- The renamer sits between the parser and the typechecker.
- However, its operation is quite tightly interwoven with the
- typechecker. This is partially due to support for Template Haskell,
- where spliced code has to be renamed and type checked. In particular,
- top-level splices lead to multiple rounds of renaming and type
- checking.
-
-
- The main externally used functions of the renamer are provided by the
- module rename/RnSource.lhs. In particular, we have
-
- All of which execute in the renamer monad RnM. The first
- function, rnSrcDecls renames a binding group; the second,
- rnTyClDecls renames a list of (toplevel) type and class
- declarations; and the third, rnSplice renames a Template
- Haskell splice. As the types indicate, the main task of the renamer is
- to convert converts all the RdrNames to Names, which includes a number of
- well-formedness checks (no duplicate declarations, all names are in
- scope, and so on). In addition, the renamer performs other, not
- strictly name-related, well-formedness checks, which includes checking
- that the appropriate flags have been supplied whenever language
- extensions are used in the source.
-
-
-
RdrNames
-
- A RdrName.RdrName is pretty much just a string (for an
- unqualified name like "f") or a pair of strings (for a
- qualified name like "M.f"):
-
-
-
-data RdrName
- = Unqual OccName
- -- Used for ordinary, unqualified occurrences
-
- | Qual Module OccName
- -- A qualified name written by the user in
- -- *source* code. The module isn't necessarily
- -- the module where the thing is defined;
- -- just the one from which it is imported
-
- | Orig Module OccName
- -- An original name; the module is the *defining* module.
- -- This is used when GHC generates code that will be fed
- -- into the renamer (e.g. from deriving clauses), but where
- -- we want to say "Use Prelude.map dammit".
-
- | Exact Name
- -- We know exactly the Name. This is used
- -- (a) when the parser parses built-in syntax like "[]"
- -- and "(,)", but wants a RdrName from it
- -- (b) when converting names to the RdrNames in IfaceTypes
- -- Here an Exact RdrName always contains an External Name
- -- (Internal Names are converted to simple Unquals)
- -- (c) by Template Haskell, when TH has generated a unique name
- Due to the tight integration of the renamer with the typechecker, both
- use the same monad in recent versions of GHC. So, we have
-
-
-
-type RnM a = TcRn a -- Historical
-type TcM a = TcRn a -- Historical
-
-
- with the combined monad defined as
-
-
-
-type TcRn a = TcRnIf TcGblEnv TcLclEnv a
-type TcRnIf a b c = IOEnv (Env a b) c
-
-data Env gbl lcl -- Changes as we move into an expression
- = Env {
- env_top :: HscEnv, -- Top-level stuff that never changes
- -- Includes all info about imported things
-
- env_us :: TcRef UniqSupply, -- Unique supply for local varibles
-
- env_gbl :: gbl, -- Info about things defined at the top level
- -- of the module being compiled
-
- env_lcl :: lcl -- Nested stuff; changes as we go into
- -- an expression
- }
-
-
- the details of the global environment type TcGblEnv and
- local environment type TcLclEnv are also defined in the
- module typecheck/TcRnTypes.lhs. The monad
- IOEnv is defined in utils/IOEnv.hs and extends
- the vanilla IO monad with an additional state parameter
- env that is treated as in a reader monad. (Side effecting
- operations, such as updating the unique supply, are done with
- TcRefs, which are simply a synonym for IORefs.)
-
-
-
Name Space Management
-
- As anticipated by the variants Orig and Exact
- of RdrName some names should not change during renaming,
- whereas others need to be turned into unique names. In this context,
- the two functions RnEnv.newTopSrcBinder and
- RnEnv.newLocals are important:
-
-
-
-newTopSrcBinder :: Module -> Maybe Name -> Located RdrName -> RnM Name
-newLocalsRn :: [Located RdrName] -> RnM [Name]
-
-
- The two functions introduces new toplevel and new local names,
- respectively, where the first two arguments to
- newTopSrcBinder determine the currently compiled module and
- the parent construct of the newly defined name. Both functions create
- new names only for RdrNames that are neither exact nor
- original.
-
-
-
Introduction of Toplevel Names: Global RdrName Environment
-
- A global RdrName environment
- RdrName.GlobalRdrEnv is a map from OccNames to
- lists of qualified names. More precisely, the latter are
- Names with an associated Provenance:
-
-
-
-data Provenance
- = LocalDef -- Defined locally
- Module
-
- | Imported -- Imported
- [ImportSpec] -- INVARIANT: non-empty
- Bool -- True iff the thing was named *explicitly*
- -- in *any* of the import specs rather than being
- -- imported as part of a group;
- -- e.g.
- -- import B
- -- import C( T(..) )
- -- Here, everything imported by B, and the constructors of T
- -- are not named explicitly; only T is named explicitly.
- -- This info is used when warning of unused names.
-
-
- The part of the global RdrName environment for a module
- that contains the local definitions is created by the function
- RnNames.importsFromLocalDecls, which also computes a data
- structure recording all imported declarations in the form of a value of
- type TcRnTypes.ImportAvails.
-
-
- The function importsFromLocalDecls, in turn, makes use of
- RnNames.getLocalDeclBinders :: Module -> HsGroup RdrName -> RnM
- [AvailInfo] to extract all declared names from a binding group,
- where HscTypes.AvailInfo is essentially a collection of
- Names; i.e., getLocalDeclBinders, on the fly,
- generates Names from the RdrNames of all
- top-level binders of the module represented by the HsGroup
- RdrName argument.
-
-
- It is important to note that all this happens before the renamer
- actually descends into the toplevel bindings of a module. In other
- words, before TcRnDriver.rnTopSrcDecls performs the
- renaming of a module by way of RnSource.rnSrcDecls, it uses
- importsFromLocalDecls to set up the global
- RdrName environment, which contains Names for
- all imported and all locally defined toplevel binders. Hence,
- when the helpers of rnSrcDecls come across the
- defining occurrences of a toplevel RdrName, they
- don't rename it by generating a new name, but they simply look up its
- name in the global RdrName environment.
-
-
-
Rebindable syntax
-
- In Haskell when one writes "3" one gets "fromInteger 3", where
- "fromInteger" comes from the Prelude (regardless of whether the
- Prelude is in scope). If you want to completely redefine numbers,
- that becomes inconvenient. So GHC lets you say
- "-fno-implicit-prelude"; in that case, the "fromInteger" comes from
- whatever is in scope. (This is documented in the User Guide.)
-
-
- This feature is implemented as follows (I always forget).
-
-
Names that are implicitly bound by the Prelude, are marked by the
- type HsExpr.SyntaxExpr. Moreover, the association list
- HsExpr.SyntaxTable is set up by the renamer to map
- rebindable names to the value they are bound to.
-
-
Currently, five constructs related to numerals
- (HsExpr.NegApp, HsPat.NPat,
- HsPat.NPlusKPat, HsLit.HsIntegral, and
- HsLit.HsFractional) and
- two constructs related to code>do expressions
- (HsExpr.BindStmt and
- HsExpr.ExprStmt) have rebindable syntax.
-
-
When the parser builds these constructs, it puts in the
- built-in Prelude Name (e.g. PrelNum.fromInteger).
-
-
When the renamer encounters these constructs, it calls
- RnEnv.lookupSyntaxName.
- This checks for -fno-implicit-prelude; if not, it just
- returns the same Name; otherwise it takes the occurrence name of the
- Name, turns it into an unqualified RdrName, and looks it up in the
- environment. The returned name is plugged back into the construct.
-
-
The typechecker uses the Name to generate the appropriate typing
- constraints.
-
-
- The GHC Commentary - The Mighty Simplifier
-
-
-
-
The GHC Commentary - The Mighty Simplifier
-
- Most of the optimising program transformations applied by GHC are
- performed on an intermediate language called Core, which
- essentially is a compiler-friendly formulation of rank-2 polymorphic
- lambda terms defined in the module CoreSyn.lhs.
- The transformation engine optimising Core programs is called the
- Simplifier and composed from a couple of modules located in the
- directory fptools/ghc/compiler/simplCore/.
- The main engine of the simplifier is contained in Simplify.lhs.
- and its driver is the routine core2core in SimplCore.lhs.
-
- The program that the simplifier has produced after applying its various
- optimisations can be obtained by passing the option
- -ddump-simpl to GHC. Moreover, the various intermediate
- stages of the optimisation process is printed when passing
- -dverbose-core2core.
-
-
- The simplification process has to take special care when handling
- recursive binding groups; otherwise, the compiler might loop.
- Therefore, the routine reOrderRec in OccurAnal.lhs
- computes a set of loop breakers - a set of definitions that
- together cut any possible loop in the binding group. It marks the
- identifiers bound by these definitions as loop breakers by enriching
- their occurrence information. Loop
- breakers will never be inlined by the simplifier; thus,
- guaranteeing termination of the simplification procedure. (This is not
- entirely accurate -- see rewrite rules below.)
-
- The processes finding loop breakers works as follows: First, the
- strongly connected components (SCC) of the graph representing all
- function dependencies is computed. Then, each SCC is inspected in turn.
- If it contains only a single binding (self-recursive function), this is
- the loop breaker. In case of multiple recursive bindings, the function
- attempts to select bindings where the decision not to inline them does
- cause the least harm - in the sense of inhibiting optimisations in the
- code. This is achieved by considering each binding in turn and awarding
- a score between 0 and 4, where a lower score means that the
- function is less useful for inlining - and thus, a better loop breaker.
- The evaluation of bingings is performed by the function
- score locally defined in OccurAnal.
-
- Note that, because core programs represent function definitions as
- one binding choosing between the possibly many equations in the
- source program with a case construct, a loop breaker cannot
- inline any of its possibly many alternatives (not even the non-recursive
- alternatives).
-
-
- The application of rewrite rules is controlled in the module Simplify.lhs
- by the function completeCall. This function first checks
- whether it should inline the function applied at the currently inspected
- call site, then simplifies the arguments, and finally, checks whether
- any rewrite rule can be applied (and also whether there is a matching
- specialised version of the applied function). The actual check for rule
- application is performed by the function Rules.lookupRule.
-
- It should be note that the application of rewrite rules is not subject
- to the loop breaker check - i.e., rules of loop breakers will be applied
- regardless of whether this may cause the simplifier to diverge.
-
-
-
- The GHC Commentary - You Got Control: The STG-language
-
-
-
-
The GHC Commentary - You Got Control: The STG-language
-
- GHC contains two completely independent backends: the byte code
- generator and the machine code generator. The decision over which of
- the two is invoked is made in HscMain.hscCodeGen.
- The machine code generator proceeds itself in a number of phases: First,
- the Core intermediate language is translated
- into STG-language; second, STG-language is transformed into a
- GHC-internal variant of C--;
- and thirdly, this is either emitted as concrete C--, converted to GNU C,
- or translated to native code (by the native code
- generator which targets IA32, Sparc, and PowerPC [as of March '5]).
-
-
- In the following, we will have a look at the first step of machine code
- generation, namely the translation steps involving the STG-language.
- Details about the underlying abstract machine, the Spineless Tagless
- G-machine, are in Implementing
- lazy functional languages on stock hardware: the Spineless Tagless
- G-machine, SL Peyton Jones, Journal of Functional Programming 2(2),
- Apr 1992, pp127-202. (Some details have changed since the publication of
- this article, but it still gives a good introduction to the main
- concepts.)
-
-
-
The STG Language
-
- The AST of the STG-language and the generation of STG code from Core is
- both located in the stgSyn/
- directory; in the modules StgSyn
- and CoreToStg,
- respectively.
-
-
- Conceptually, the STG-language is a lambda calculus (including data
- constructors and case expressions) whose syntax is restricted to make
- all control flow explicit. As such, it can be regarded as a variant of
- administrative normal form (ANF). (C.f., The essence of compiling
- with continuations. Cormac Flanagan, Amr Sabry, Bruce F. Duba, and
- Matthias Felleisen. ACM SIGPLAN Conference on Programming Language
- Design and Implementation, ACM Press, 1993.) Each syntactic from
- has a precise operational interpretation, in addition to the
- denotational interpretation inherited from the lambda calculus. The
- concrete representation of the STG language inside GHC also includes
- auxiliary attributes, such as static reference tables (SRTs),
- which determine the top-level bindings referenced by each let binding
- and case expression.
-
-
- As usual in ANF, arguments to functions etc. are restricted to atoms
- (i.e., constants or variables), which implies that all sub-expressions
- are explicitly named and evaluation order is explicit. Specific to the
- STG language is that all let bindings correspond to closure allocation
- (thunks, function closures, and data constructors) and that case
- expressions encode both computation and case selection. There are two
- flavours of case expressions scrutinising boxed and unboxed values,
- respectively. The former perform function calls including demanding the
- evaluation of thunks, whereas the latter execute primitive operations
- (such as arithmetic on fixed size integers and floating-point numbers).
-
-
- The representation of STG language defined in StgSyn
- abstracts over both binders and occurrences of variables. The type names
- involved in this generic definition all carry the prefix
- Gen (such as in GenStgBinding). Instances of
- these generic definitions, where both binders and occurrences are of type
- Id.Id
- are defined as type synonyms and use type names that drop the
- Gen prefix (i.e., becoming plain StgBinding).
- Complete programs in STG form are represented by values of type
- [StgBinding].
-
-
-
From Core to STG
-
- Although, the actual translation from Core AST into STG AST is performed
- by the function CoreToStg.coreToStg
- (or CoreToStg.coreExprToStg
- for individual expressions), the translation crucial depends on CorePrep.corePrepPgm
- (resp. CorePrep.corePrepExpr),
- which prepares Core code for code generation (for both byte code and
- machine code generation). CorePrep saturates primitive and
- constructor applications, turns the code into A-normal form, renames all
- identifiers into globally unique names, generates bindings for
- constructor workers, constructor wrappers, and record selectors plus
- some further cleanup.
-
-
- In other words, after Core code is prepared for code generation it is
- structurally already in the form required by the STG language. The main
- work performed by the actual transformation from Core to STG, as
- performed by CoreToStg.coreToStg,
- is to compute the live and free variables as well as live CAFs (constant
- applicative forms) at each let binding and case alternative. In
- subsequent phases, the live CAF information is used to compute SRTs.
- The live variable information is used to determine which stack slots
- need to be zapped (to avoid space leaks) and the free variable
- information is need to construct closures. Moreover, hints for
- optimised code generation are computed, such as whether a closure needs
- to be updated after is has been evaluated.
-
-
-
STG Passes
-
- These days little actual work is performed on programs in STG form; in
- particular, the code is not further optimised. All serious optimisation
- (except low-level optimisations which are performed during native code
- generation) has already been done on Core. The main task of CoreToStg.stg2stg
- is to compute SRTs from the live CAF information determined during STG
- generation. Other than that, SCCfinal.stgMassageForProfiling
- is executed when compiling for profiling and information may be dumped
- for debugging purposes.
-
-
-
Towards C--
-
- GHC's internal form of C-- is defined in the module Cmm.
- The definition is generic in that it abstracts over the type of static
- data and of the contents of basic blocks (i.e., over the concrete
- representation of constant data and instructions). These generic
- definitions have names carrying the prefix Gen (such as
- GenCmm). The same module also instantiates the generic
- form to a concrete form where data is represented by
- CmmStatic and instructions are represented by
- CmmStmt (giving us, e.g., Cmm from
- GenCmm). The concrete form more or less follows the
- external C-- language.
-
-
- Programs in STG form are translated to Cmm by CodeGen.codeGen
-
-
-
-
-Last modified: Sat Mar 5 22:55:25 EST 2005
-
-
-
-
diff --git a/docs/comm/the-beast/syntax.html b/docs/comm/the-beast/syntax.html
deleted file mode 100644
index be5bbefa17a7..000000000000
--- a/docs/comm/the-beast/syntax.html
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
-
-
- The GHC Commentary - Just Syntax
-
-
-
-
- The lexer is a rather tedious piece of Haskell code contained in the
- module Lex.
- Its complexity partially stems from covering, in addition to Haskell 98,
- also the whole range of GHC language extensions plus its ability to
- analyse interface files in addition to normal Haskell source. The lexer
- defines a parser monad P a, where a is the
- type of the result expected from a successful parse. More precisely, a
- result of type
-
-data ParseResult a = POk PState a
- | PFailed Message
-
-
- is produced with Message being from ErrUtils
- (and currently is simply a synonym for SDoc).
-
- The record type PState contains information such as the
- current source location, buffer state, contexts for layout processing,
- and whether Glasgow extensions are accepted (either due to
- -fglasgow-exts or due to reading an interface file). Most
- of the fields of PState store unboxed values; in fact, even
- the flag indicating whether Glasgow extensions are enabled is
- represented by an unboxed integer instead of by a Bool. My
- (= chak's) guess is that this is to avoid having to perform a
- case on a boxed value in the inner loop of the lexer.
-
- The same lexer is used by the Haskell source parser, the Haskell
- interface parser, and the package configuration parser.
-
-
The Haskell Source Parser
-
- The parser for Haskell source files is defined in the form of a parser
- specification for the parser generator Happy in the file Parser.y.
- The parser exports three entry points for parsing entire modules
- (parseModule, individual statements
- (parseStmt), and individual identifiers
- (parseIdentifier), respectively. The last two are needed
- for GHCi. All three require a parser state (of type
- PState) and are invoked from HscMain.
-
- Parsing of Haskell is a rather involved process. The most challenging
- features are probably the treatment of layout and expressions that
- contain infix operators. The latter may be user-defined and so are not
- easily captured in a static syntax specification. Infix operators may
- also appear in the right hand sides of value definitions, and so, GHC's
- parser treats those in the same way as expressions. In other words, as
- general expressions are a syntactic superset of expressions - ok, they
- nearly are - the parser simply attempts to parse a general
- expression in such positions. Afterwards, the generated parse tree is
- inspected to ensure that the accepted phrase indeed forms a legal
- pattern. This and similar checks are performed by the routines from ParseUtil. In
- some cases, these routines do, in addition to checking for
- wellformedness, also transform the parse tree, such that it fits into
- the syntactic context in which it has been parsed; in fact, this happens
- for patterns, which are transformed from a representation of type
- RdrNameHsExpr into a representation of type
- RdrNamePat.
-
-
The Haskell Interface Parser
-
- The parser for interface files is also generated by Happy from ParseIface.y.
- It's main routine parseIface is invoked from RnHiFiles.readIface.
-
-
The Package Configuration Parser
-
- The parser for configuration files is by far the smallest of the three
- and defined in ParsePkgConf.y.
- It exports loadPackageConfig, which is used by DriverState.readPackageConf.
-
-
- Probably the most important phase in the frontend is the type checker,
- which is located at fptools/ghc/compiler/typecheck/.
- GHC type checks programs in their original Haskell form before the
- desugarer converts them into Core code. This complicates the type
- checker as it has to handle the much more verbose Haskell AST, but it
- improves error messages, as those message are based on the same
- structure that the user sees.
-
-
- GHC defines the abstract syntax of Haskell programs in HsSyn
- using a structure that abstracts over the concrete representation of
- bound occurrences of identifiers and patterns. The module TcHsSyn
- defines a number of helper function required by the type checker. Note
- that the type TcRnTypes.TcId
- used to represent identifiers in some signatures during type checking
- is, in fact, nothing but a synonym for a plain
- Id.
-
-
- It is also noteworthy, that the representations of types changes during
- type checking from HsType to TypeRep.Type.
- The latter is a hybrid type representation that
- is used to type Core, but still contains sufficient information to
- recover source types. In particular, the type checker maintains and
- compares types in their Type form.
-
-
-
The Overall Flow of Things
-
-
Entry Points Into the Type Checker
-
- The interface of the type checker (and renamer) to the rest of the compiler is provided
- by TcRnDriver.
- Entire modules are processed by calling tcRnModule and GHCi
- uses tcRnStmt, tcRnExpr, and
- tcRnType to typecheck statements and expressions, and to
- kind check types, respectively. Moreover, tcRnExtCore is
- provided to typecheck external Core code. Moreover,
- tcTopSrcDecls is used by Template Haskell - more
- specifically by TcSplice.tc_bracket
- - to type check the contents of declaration brackets.
-
-
-
Renaming and Type Checking a Module
-
- The function tcRnModule controls the complete static
- analysis of a Haskell module. It sets up the combined renamer and type
- checker monad, resolves all import statements, initiates the actual
- renaming and type checking process, and finally, wraps off by processing
- the export list.
-
-
- The actual type checking and renaming process is initiated via
- TcRnDriver.tcRnSrcDecls, which uses a helper called
- tc_rn_src_decls to implement the iterative renaming and
- type checking process required by Template
- Haskell. However, before it invokes tc_rn_src_decls,
- it takes care of hi-boot files; afterwards, it simplifies type
- constraints and zonking (see below regarding the later).
-
-
- The function tc_rn_src_decls partitions static analysis of
- a whole module into multiple rounds, where the initial round is followed
- by an additional one for each toplevel splice. It collects all
- declarations up to the next splice into an HsDecl.HsGroup
- to rename and type check that declaration group by calling
- TcRnDriver.tcRnGroup. Afterwards, it executes the
- splice (if there are any left) and proceeds to the next group, which
- includes the declarations produced by the splice.
-
-
- The function tcRnGroup, finally, gets down to invoke the
- actual renaming and type checking via
- TcRnDriver.rnTopSrcDecls and
- TcRnDriver.tcTopSrcDecls, respectively. The renamer, apart
- from renaming, computes the global type checking environment, of type
- TcRnTypes.TcGblEnv, which is stored in the type checking
- monad before type checking commences.
-
-
-
Type Checking a Declaration Group
-
- The type checking of a declaration group, performed by
- tcTopSrcDecls starts by processing of the type and class
- declarations of the current module, using the function
- TcTyClsDecls.tcTyAndClassDecls. This is followed by a
- first round over instance declarations using
- TcInstDcls.tcInstDecls1, which in particular generates all
- additional bindings due to the deriving process. Then come foreign
- import declarations (TcForeign.tcForeignImports) and
- default declarations (TcDefaults.tcDefaults).
-
-
- Now, finally, toplevel value declarations (including derived ones) are
- type checked using TcBinds.tcTopBinds. Afterwards,
- TcInstDcls.tcInstDecls2 traverses instances for the second
- time. Type checking concludes with processing foreign exports
- (TcForeign.tcForeignExports) and rewrite rules
- (TcRules.tcRules). Finally, the global environment is
- extended with the new bindings.
-
-
-
Type checking Type and Class Declarations
-
- Type and class declarations are type checked in a couple of phases that
- contain recursive dependencies - aka knots. The first knot
- encompasses almost the whole type checking of these declarations and
- forms the main piece of TcTyClsDecls.tcTyAndClassDecls.
-
-
- Inside this big knot, the first main operation is kind checking, which
- again involves a knot. It is implemented by kcTyClDecls,
- which performs kind checking of potentially recursively-dependent type
- and class declarations using kind variables for initially unknown kinds.
- During processing the individual declarations some of these variables
- will be instantiated depending on the context; the rest gets by default
- kind * (during zonking of the kind signatures).
- Type synonyms are treated specially in this process, because they can
- have an unboxed type, but they cannot be recursive. Hence, their kinds
- are inferred in dependency order. Moreover, in contrast to class
- declarations and other type declarations, synonyms are not entered into
- the global environment as a global TyThing.
- (TypeRep.TyThing is a sum type that combines the various
- flavours of typish entities, such that they can be stuck into type
- environments and similar.)
-
-
-
More Details
-
-
Types Variables and Zonking
-
- During type checking type variables are represented by mutable variables
- - cf. the variable story. Consequently,
- unification can instantiate type variables by updating those mutable
- variables. This process of instantiation is (for reasons that elude me)
- called zonking
- in GHC's sources. The zonking routines for the various forms of Haskell
- constructs are responsible for most of the code in the module TcHsSyn,
- whereas the routines that actually operate on mutable types are defined
- in TcMType;
- this includes the zonking of type variables and type terms, routines to
- create mutable structures and update them as well as routines that check
- constraints, such as that type variables in function signatures have not
- been instantiated during type checking. The actual type unification
- routine is uTys in the module TcUnify.
-
-
- All type variables that may be instantiated (those in signatures
- may not), but haven't been instantiated during type checking, are zonked
- to (), so that after type checking all mutable variables
- have been eliminated.
-
-
-
Type Representation
-
- The representation of types is fixed in the module TcRep
- and exported as the data type Type. As explained in TcType,
- GHC supports rank-N types, but, in the type checker, maintains the
- restriction that type variables cannot be instantiated to quantified
- types (i.e., the type system is predicative). The type checker floats
- universal quantifiers outside and maintains types in prenex form.
- (However, quantifiers can, of course, not float out of negative
- positions.) Overall, we have
-
- where sigma is in prenex form; i.e., there is never a
- forall to the right of an arrow in a phi type. Moreover, a
- type of the form tau never contains a quantifier (which
- includes arguments to type constructors).
-
-
- Of particular interest are the variants SourceTy and
- NoteTy of TypeRep.Type.
- The constructor SourceTy :: SourceType -> Type represents a
- type constraint; that is, a predicate over types represented by a
- dictionary. The type checker treats a SourceTy as opaque,
- but during the translation to core it will be expanded into its concrete
- representation (i.e., a dictionary type) by the function Type.sourceTypeRep.
- Note that newtypes are not covered by SourceTypes anymore,
- even if some comments in GHC still suggest this. Instead, all newtype
- applications are initially represented as a NewTcApp, until
- they are eliminated by calls to Type.newTypeRep.
-
-
- The NoteTy constructor is used to add non-essential
- information to a type term. Such information has the type
- TypeRep.TyNote and is either the set of free type variables
- of the annotated expression or the unexpanded version of a type synonym.
- Free variables sets are cached as notes to save the overhead of
- repeatedly computing the same set for a given term. Unexpanded type
- synonyms are useful for generating comprehensible error messages, but
- have no influence on the process of type checking.
-
-
-
Type Checking Environment
-
- During type checking, GHC maintains a type environment whose
- type definitions are fixed in the module TcRnTypes with the operations defined in
-TcEnv.
- Among other things, the environment contains all imported and local
- instances as well as a list of global entities (imported and
- local types and classes together with imported identifiers) and
- local entities (locally defined identifiers). This environment
- is threaded through the type checking monad, whose support functions
- including initialisation can be found in the module TcRnMonad.
-
-
- Usage occurrences of identifiers are processed by the function
- tcId whose main purpose is to instantiate
- overloaded identifiers. It essentially calls
- TcInst.instOverloadedFun once for each universally
- quantified set of type constraints. It should be noted that overloaded
- identifiers are replaced by new names that are first defined in the LIE
- (Local Instance Environment?) and later promoted into top-level
- bindings.
-
-
- GHC implements overloading using so-called dictionaries. A
- dictionary is a tuple of functions -- one function for each method in
- the class of which the dictionary implements an instance. During type
- checking, GHC replaces each type constraint of a function with one
- additional argument. At runtime, the extended function gets passed a
- matching class dictionary by way of these additional arguments.
- Whenever the function needs to call a method of such a class, it simply
- extracts it from the dictionary.
-
- This sounds simple enough; however, the actual implementation is a bit
- more tricky as it wants to keep track of all the instances at which
- overloaded functions are used in a module. This information is useful
- to optimise the code. The implementation is the module Inst.lhs.
-
- The function instOverloadedFun is invoked for each
- overloaded usage occurrence of an identifier, where overloaded means that
- the type of the idendifier contains a non-trivial type constraint. It
- proceeds in two steps: (1) Allocation of a method instance
- (newMethodWithGivenTy) and (2) instantiation of functional
- dependencies. The former implies allocating a new unique identifier,
- which replaces the original (overloaded) identifier at the currently
- type-checked usage occurrence.
-
- The new identifier (after being threaded through the LIE) eventually
- will be bound by a top-level binding whose rhs contains a partial
- application of the original overloaded identifier. This papp applies
- the overloaded function to the dictionaries needed for the current
- instance. In GHC lingo, this is called a method. Before
- becoming a top-level binding, the method is first represented as a value
- of type Inst.Inst, which makes it easy to fold multiple
- instances of the same identifier at the same types into one global
- definition. (And probably other things, too, which I haven't
- investigated yet.)
-
-
- Note: As of 13 January 2001 (wrt. to the code in the
- CVS HEAD), the above mechanism interferes badly with RULES pragmas
- defined over overloaded functions. During instantiation, a new name is
- created for an overloaded function partially applied to the dictionaries
- needed in a usage position of that function. As the rewrite rule,
- however, mentions the original overloaded name, it won't fire anymore
- -- unless later phases remove the intermediate definition again. The
- latest CVS version of GHC has an option
- -fno-method-sharing, which avoids sharing instantiation
- stubs. This is usually/often/sometimes sufficient to make the rules
- fire again.
-
-
- GHC essentially supports two type systems: (1) the source type
- system (which is a heavily extended version of the type system of
- Haskell 98) and (2) the Core type system, which is the type system
- used by the intermediate language (see also Sugar Free: From Haskell To Core).
-
-
- During parsing and renaming, type information is represented in a form
- that is very close to Haskell's concrete syntax; it is defined by
- HsTypes.HsType. In addition, type, class, and instance
- declarations are maintained in their source form as defined in the
- module HsDecl. The situation changes during type checking,
- where types are translated into a second representation, which is
- defined in the module types/TypeRep.lhs, as type
- Type. This second representation is peculiar in that it is
- a hybrid between the source representation of types and the Core
- representation of types. Using functions, such as
- Type.coreView and Type.deepCoreView, a value
- of type Type exhibits its Core representation. On the
- other hand, pretty printing a Type with
- TypeRep.pprType yields the type's source representation.
-
-
- In fact, the type checker maintains type
- environments based on Type, but needs to perform type
- checking on source-level types. As a result, we have functions
- Type.tcEqType and Type.tcCmpType, which
- compare types based on their source representation, as well as the
- function coreEqType, which compares them based on their
- core representation. The latter is needed during type checking of Core
- (as performed by the functions in the module
- coreSyn/CoreLint.lhs).
-
-
-
Type Synonyms
-
- Type synonyms in Haskell are essentially a form of macro definitions on
- the type level. For example, when the type checker compares two type
- terms, synonyms are always compared in their expanded form. However, to
- produce good error messages, we like to avoid expanding type synonyms
- during pretty printing. Hence, Type has a variant
- NoteTy TyNote Type, where
-
-
-
-data TyNote
- = FTVNote TyVarSet -- The free type variables of the noted expression
-
- | SynNote Type -- Used for type synonyms
- -- The Type is always a TyConApp, and is the un-expanded form.
- -- The type to which the note is attached is the expanded form.
-
-
- In other words, a NoteTy represents the expanded form of a
- type synonym together with a note stating its source form.
-
-
-
Creating Representation Types of Synonyms
-
- During translation from HsType to Type the
- function Type.mkSynTy is used to construct representations
- of applications of type synonyms. It creates a NoteTy node
- if the synonym is applied to a sufficient number of arguments;
- otherwise, it builds a simple TyConApp and leaves it to
- TcMType.checkValidType to pick up invalid unsaturated
- synonym applications. While creating a NoteTy,
- mkSynTy also expands the synonym by substituting the type
- arguments for the parameters of the synonym definition, using
- Type.substTyWith.
-
-
- The function mkSynTy is used indirectly via
- mkGenTyConApp, mkAppTy, and
- mkAppTy, which construct type representations involving
- type applications. The function mkSynTy is also used
- directly during type checking interface files; this is for tedious
- reasons to do with forall hoisting - see the comment at
- TcIface.mkIfTcApp.
-
-
-
Newtypes
-
- Data types declared by a newtype declarations constitute new
- type constructors---i.e., they are not just type macros, but introduce
- new type names. However, provided that a newtype is not recursive, we
- still want to implement it by its representation type. GHC realises this
- by providing two flavours of type equality: (1) tcEqType is
- source-level type equality, which compares newtypes and
- PredTypes by name, and (2) coreEqType compares
- them structurally (by using deepCoreView to expand the
- representation before comparing). The function
- deepCoreView (via coreView) invokes
- expandNewTcApp for every type constructor application
- (TyConApp) to determine whether we are looking at a newtype
- application that needs to be expanded to its representation type.
-
-
-
Predicates
-
- The dictionary translation of type classes, translates each predicate in
- a type context of a type signature into an additional argument, which
- carries a dictionary with the functions overloaded by the corresponding
- class. The Type data type has a special variant
- PredTy PredType for predicates, where
-
-
-
-data PredType
- = ClassP Class [Type] -- Class predicate
- | IParam (IPName Name) Type -- Implicit parameter
-
-
- These types need to be handled as source type during type checking, but
- turn into their representations when inspected through
- coreView. The representation is determined by
- Type.predTypeRep.
-
-
-
Representation of Type Constructors
-
- Type constructor applications are represented in Type by
- the variant TyConApp :: TyCon -> [Type] -> Type. The first
- argument to TyConApp, namely TyCon.TyCon,
- distinguishes between function type constructors (variant
- FunTyCon) and algebraic type constructors (variant
- AlgTyCon), which arise from data and newtype declarations.
- The variant AlgTyCon contains all the information available
- from the data/newtype declaration as well as derived information, such
- as the Unique and argument variance information. This
- includes a field algTcRhs :: AlgTyConRhs, where
- AlgTyConRhs distinguishes three kinds of algebraic data
- type declarations: (1) declarations that have been exported abstractly,
- (2) data declarations, and (3) newtype
- declarations. The last two both include their original right hand side;
- in addition, the third variant also caches the "ultimate" representation
- type, which is the right hand side after expanding all type synonyms and
- non-recursive newtypes.
-
-
- Both data and newtype declarations refer to their data constructors
- represented as DataCon.DataCon, which include all details
- of their signature (as derived from the original declaration) as well
- information for code generation, such as their tag value.
-
-
-
Representation of Classes and Instances
-
- Class declarations turn into values of type Class.Class.
- They represent methods as the Ids of the dictionary
- selector functions. Similar selector functions are available for
- superclass dictionaries.
-
-
- Instance declarations turn into values of type
- InstEnv.Instance, which in interface files are represented
- as IfaceSyn.IfaceInst. Moreover, the type
- InstEnv.InstEnv, which is a synonym for UniqFM
- ClsInstEnv, provides a mapping of classes to their
- instances---ClsInstEnv is essentially a list of instance
- declarations.
-
-
-
-
-Last modified: Sun Jun 19 13:07:22 EST 2005
-
-
-
-
diff --git a/docs/comm/the-beast/vars.html b/docs/comm/the-beast/vars.html
deleted file mode 100644
index 9bbd310c605a..000000000000
--- a/docs/comm/the-beast/vars.html
+++ /dev/null
@@ -1,235 +0,0 @@
-
-
-
-
- The GHC Commentary - The Real Story about Variables, Ids, TyVars, and the like
-
-
-
-
The GHC Commentary - The Real Story about Variables, Ids, TyVars, and the like
-
-
-
-
Variables
-
-The Var type, defined in basicTypes/Var.lhs,
-represents variables, both term variables and type variables:
-
- data Var
- = Var {
- varName :: Name,
- realUnique :: FastInt,
- varType :: Type,
- varDetails :: VarDetails,
- varInfo :: IdInfo
- }
-
-
-
The varName field contains the identity of the variable:
-its unique number, and its print-name. See "The truth about names".
-
-
The realUnique field caches the unique number in the
-varName field, just to make comparison of Vars a little faster.
-
-
The varType field gives the type of a term variable, or the kind of a
-type variable. (Types and kinds are both represented by a Type.)
-
-
The varDetails field distinguishes term variables from type variables,
-and makes some further distinctions (see below).
-
-
For term variables (only) the varInfo field contains lots of useful
-information: strictness, unfolding, etc. However, this information is all optional;
-you can always throw away the IdInfo. In contrast, you can't safely throw away
-the VarDetails of a Var
-
-
-It's often fantastically convenient to have term variables and type variables
-share a single data type. For example,
-
- exprFreeVars :: CoreExpr -> VarSet
-
-If there were two types, we'd need to return two sets. Simiarly, big lambdas and
-little lambdas use the same constructor in Core, which is extremely convenient.
-
-We define a couple of type synonyms:
-
- type Id = Var -- Term variables
- type TyVar = Var -- Type variables
-
-just to help us document the occasions when we are expecting only term variables,
-or only type variables.
-
-
-
The VarDetails field
-
-The VarDetails field tells what kind of variable this is:
-
-data VarDetails
- = LocalId -- Used for locally-defined Ids (see NOTE below)
- LocalIdDetails
-
- | GlobalId -- Used for imported Ids, dict selectors etc
- GlobalIdDetails
-
- | TyVar
- | MutTyVar (IORef (Maybe Type)) -- Used during unification;
- TyVarDetails
-
-The TyVar case is self-explanatory. The MutTyVar
-case is used only during type checking. Then a type variable can be unified,
-using an imperative update, with a type, and that is what the
-IORef is for. The TcType.TyVarDetails field records
-the sort of type variable we are dealing with. It is defined as
-
-SigTv marks type variables that were introduced when
-instantiating a type signature prior to matching it against the inferred type
-of a definition. The variants ClsTv and InstTv mark
-scoped type variables introduced by class and instance heads, respectively.
-These first three sorts of type variables are skolem variables (tested by the
-predicate isSkolemTyVar); i.e., they must not be
-instantiated. All other type variables are marked as VanillaTv.
-
-For a long time I tried to keep mutable Vars statically type-distinct
-from immutable Vars, but I've finally given up. It's just too painful.
-After type checking there are no MutTyVars left, but there's no static check
-of that fact.
-
-
Term variables (Id)
-
-A term variable (of type Id) is represented either by a
-LocalId or a GlobalId:
-
-A GlobalId is
-
-
Always bound at top-level.
-
Always has a GlobalName, and hence has
- a Unique that is globally unique across the whole
- GHC invocation (a single invocation may compile multiple modules).
-
Has IdInfo that is absolutely fixed, forever.
-
-
-
-A LocalId is:
-
-
Always bound in the module being compiled:
-
-
either bound within an expression (lambda, case, local let(rec))
-
or defined at top level in the module being compiled.
-
-
Has IdInfo that changes as the simpifier bashes repeatedly on it.
-
-
-The key thing about LocalIds is that the free-variable finder
-typically treats them as candidate free variables. That is, it ignores
-GlobalIds such as imported constants, data contructors, etc.
-
-An important invariant is this: All the bindings in the module
-being compiled (whether top level or not) are LocalIds
-until the CoreTidy phase. In the CoreTidy phase, all
-externally-visible top-level bindings are made into GlobalIds. This
-is the point when a LocalId becomes "frozen" and becomes
-a fixed, immutable GlobalId.
-
-(A binding is "externally-visible" if it is exported, or
-mentioned in the unfolding of an externally-visible Id. An
-externally-visible Id may not have an unfolding, either because it is
-too big, or because it is the loop-breaker of a recursive group.)
-
-
Global Ids and implicit Ids
-
-GlobalIds are further categorised by their GlobalIdDetails.
-This type is defined in basicTypes/IdInfo, because it mentions other
-structured types like DataCon. Unfortunately it is *used* in Var.lhs
-so there's a hi-boot knot to get it there. Anyway, here's the declaration:
-
-data GlobalIdDetails
- = NotGlobalId -- Used as a convenient extra return value
- -- from globalIdDetails
-
- | VanillaGlobal -- Imported from elsewhere
-
- | PrimOpId PrimOp -- The Id for a primitive operator
- | FCallId ForeignCall -- The Id for a foreign call
-
- -- These next ones are all "implicit Ids"
- | RecordSelId FieldLabel -- The Id for a record selector
- | DataConId DataCon -- The Id for a data constructor *worker*
- | DataConWrapId DataCon -- The Id for a data constructor *wrapper*
- -- [the only reasons we need to know is so that
- -- a) we can suppress printing a definition in the interface file
- -- b) when typechecking a pattern we can get from the
- -- Id back to the data con]
-
-The GlobalIdDetails allows us to go from the Id for
-a record selector, say, to its field name; or the Id for a primitive
-operator to the PrimOp itself.
-
-Certain GlobalIds are called "implicit" Ids. An implicit
-Id is derived by implication from some other declaration. So a record selector is
-derived from its data type declaration, for example. An implicit Ids is always
-a GlobalId. For most of the compilation, the implicit Ids are just
-that: implicit. If you do -ddump-simpl you won't see their definition. (That's
-why it's true to say that until CoreTidy all Ids in this compilation unit are
-LocalIds.) But at CorePrep, a binding is added for each implicit Id defined in
-this module, so that the code generator will generate code for the (curried) function.
-
-Implicit Ids carry their unfolding inside them, of course, so they may well have
-been inlined much earlier; but we generate the curried top-level defn just in
-case its ever needed.
-
-
LocalIds
-
-The LocalIdDetails gives more info about a LocalId:
-
-data LocalIdDetails
- = NotExported -- Not exported
- | Exported -- Exported
- | SpecPragma -- Not exported, but not to be discarded either
- -- It's unclean that this is so deeply built in
-
-From this we can tell whether the LocalId is exported, and that
-tells us whether we can drop an unused binding as dead code.
-
-The SpecPragma thing is a HACK. Suppose you write a SPECIALIZE pragma:
-
- foo :: Num a => a -> a
- {-# SPECIALIZE foo :: Int -> Int #-}
- foo = ...
-
-The type checker generates a dummy call to foo at the right types:
-
- $dummy = foo Int dNumInt
-
-The Id $dummy is marked SpecPragma. Its role is to hang
-onto that call to foo so that the specialiser can see it, but there
-are no calls to $dummy.
-The simplifier is careful not to discard SpecPragma Ids, so that it
-reaches the specialiser. The specialiser processes the right hand side of a SpecPragma Id
-to find calls to overloaded functions, and then discards the SpecPragma Id.
-So SpecPragma behaves a like Exported, at least until the specialiser.
-
-
-
ExternalNames and InternalNames
-
-Notice that whether an Id is a LocalId or GlobalId is
-not the same as whether the Id has an ExternaName or an InternalName
-(see "The truth about Names"):
-
-
Every GlobalId has an ExternalName.
-
A LocalId might have either kind of Name.
-
-
-
-Last modified: Fri Sep 12 15:17:18 BST 2003
-
-
-
-
-
diff --git a/docs/core-spec/core-spec.mng b/docs/core-spec/core-spec.mng
index 4e134e08541a..28540ac8e9e9 100644
--- a/docs/core-spec/core-spec.mng
+++ b/docs/core-spec/core-spec.mng
@@ -75,7 +75,9 @@ We also leave abstract the function \coderef{basicTypes/Literal.lhs}{literalType
and the judgment \coderef{coreSyn/CoreLint.lhs}{lintTyLit} (written $[[G |-tylit lit : k]]$).
\subsection{Variables}
-
+\enlargethispage{10pt} % without this first line of "z" definition is placed on
+ % second page and it becomes the only line of text on that
+ % page, resulting in whole page being empty.
GHC uses the same datatype to represent term-level variables and type-level
variables:
diff --git a/docs/core-spec/core-spec.pdf b/docs/core-spec/core-spec.pdf
index b9a9f4ba12f5..5d9f29c58b07 100644
Binary files a/docs/core-spec/core-spec.pdf and b/docs/core-spec/core-spec.pdf differ
diff --git a/docs/docbook-cheat-sheet/Makefile b/docs/docbook-cheat-sheet/Makefile
deleted file mode 100644
index 8cd9f518698e..000000000000
--- a/docs/docbook-cheat-sheet/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-TOP = ../..
-include $(TOP)/mk/boilerplate.mk
-
-XML_DOC = docbook-cheat-sheet
-INSTALL_XML_DOC = docbook-cheat-sheet
-
-include $(TOP)/mk/bindist.mk
-
-include $(TOP)/mk/target.mk
diff --git a/docs/docbook-cheat-sheet/docbook-cheat-sheet.xml b/docs/docbook-cheat-sheet/docbook-cheat-sheet.xml
deleted file mode 100644
index d48b3ef6f212..000000000000
--- a/docs/docbook-cheat-sheet/docbook-cheat-sheet.xml
+++ /dev/null
@@ -1,223 +0,0 @@
-
-
-
-
-
-
- Using DocBook to write GHC documentation
- The GHC Team
- glasgow-haskell-{users,bugs}@dcs.gla.ac.uk
- January 2000
-
-
-
- Getting the DocBook tools
- See the installation guide.
-
-
-
- Document layout
-
- The GHC documentation is written using DocBook XML V4.5, so
- the first few lines should look like this:
-
-
-<?xml version="1.0" encoding="iso-8859-1"?>
-<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
- "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
-
-
- The encoding can of course be chosen according to taste.
-
- This guide is not meant to teach you
- how to write DocBook; read the DocBook book for that. It is
- more of a reference than a tutorial, so see the DocBook home page
- for other links.
-
- However, by popular demand, here are some useful points:
-
-
-
-
- Remember to use para
- inside listitems.
-
-
-
- The rest of this section outlines the use of several tags
- which may not be obvious (DocBook is rather scholastic in style:
- it has tags for many things from C function prototypes to keyboard
- bindings; at the same time it has many omissions and
- oddities). The current scheme has many infelicities, partly
- because it was dreamt up in a hurry while the author was learning
- DocBook and converting the documentation thereto, and partly
- because DocBook is rather C-centric.
-
-
-
-
- Comments
-
- Comments in XML look like this: This is a comment.
-
-
-
-
- command
-
- Used for commands typed into interactive sessions
- (e.g. cp foo bar and the names of
- programs such as gmake.
-
-
-
-
- constant
-
- Used for system constants such as
- U_MAXINT and
- Makefile variables like
- SRC_FILES (because they are usually
- constant for a given run of make, and
- hence have a constant feel to them).
-
-
-
-
- email
-
- For email addresses. This is a tag that's easy to
- overlook if you don't know it's there.
-
-
-
-
- filename
-
- Used for paths, filenames, file extensions.
-
-
-
-
- function
-
- Used for functions and constructors.
-
-
-
-
- indexterm
-
- The normal way to mark up an index term is
- <indexterm><primary>term</primary></indexterm>.
-
-
-
-
- keycap
- keycombo
-
- Some more tags you may miss. Used for combinations
- such as
- ControlD.
-
-
-
-
- literal
-
- Used for everything that should appear in typewriter
- font that has no other obvious tag: types, monads, small
- snippets of program text that are formatted inline, and the
- like.
-
-
-
-
- option
-
- Used for compiler options and similar.
-
-
-
-
- programlisting
-
- For displayed program listings (including shell
- scripts).
-
-
-
-
- screen
-
- For displayed screen dumps, such as portions of shell
- interaction. It's easy to tell the difference between these
- and shell scripts: the latter lack a shell prompt.
-
-
-
-
- varname
-
- Used for variables, but not type variables.
-
-
-
-
-
-
-
-
- Tables
-
- Tables are quite complicated to write in DocBook XML (as in HTML,
- there are lots of fiddly tags), so here's an example you can
- cannibalise. In the spirit of the LaTeX short introduction I don't
- repeat all the markup verbatim; you have to look at the source for
- that.
-
-
-
-
-
-
-
-
-
- Here's
- a sample
- table
-
-
-
- With differently
- aligned
- cells
-
-
-
-
- There's not much else to it. Entries can span
- both extra rows and extra columns; just be careful when
- using block markup (such as paras) within an entry that there is no space
- between the open and close entry tags and the adjacent
- text, as otherwise you will suffer from Pernicious
- Mixed Content (the parser will think you're
- using inline markup).
-
-
-
-
-
-
-
-
diff --git a/docs/storage-mgt/rp.tex b/docs/storage-mgt/rp.tex
index 20e313ba4316..0d841b9d42ec 100644
--- a/docs/storage-mgt/rp.tex
+++ b/docs/storage-mgt/rp.tex
@@ -1029,7 +1029,7 @@ \section{Usage}
\label{fig-cacheprof}
\end{figure}
-\section{Comparision with nhc}
+\section{Comparison with nhc}
\section{Files}
diff --git a/docs/users_guide/7.10.1-notes.xml b/docs/users_guide/7.10.1-notes.xml
new file mode 100644
index 000000000000..0af4c3145b8e
--- /dev/null
+++ b/docs/users_guide/7.10.1-notes.xml
@@ -0,0 +1,411 @@
+
+
+ Release notes for version 7.10.1
+
+
+ The significant changes to the various parts of the compiler are listed
+ in the following sections. There have also been numerous bug fixes and
+ performance improvements over the 7.8 branch.
+
+
+
+ Highlights
+
+
+ The highlights, since the 7.8 branch, are:
+
+
+
+
+
+ TODO FIXME
+
+
+
+
+
+
+ Full details
+
+ Language
+
+
+
+ Added support for binary integer literals
+
+
+
+
+
+
+ Compiler
+
+
+
+ GHC now checks that all the language extensions required for
+ the inferred type signatures are explicitly enabled. This
+ means that if any of the type signatures inferred in your
+ program requires some language extension you will need to
+ enable it. The motivation is that adding a missing type
+ signature inferred by GHC should yield a program that
+ typechecks. Previously this was not the case.
+
+
+ This is a breaking change. Code that used to compile in the
+ past might fail with an error message requiring some
+ particular language extension (most likely
+ , or
+ ).
+
+
+
+
+
+
+ GHCi
+
+
+
+ TODO FIXME
+
+
+
+
+
+
+ Template Haskell
+
+
+
+ TODO FIXME
+
+
+
+
+
+
+ Runtime system
+
+
+
+ TODO FIXME
+
+
+
+
+
+
+ Build system
+
+
+
+ ghc-pkg now respects
+ and when modifying packages (e.g.
+ changing exposed/trust flag or unregistering). Previously,
+ ghc-pkg would ignore these flags and modify
+ whichever package it found first on the database stack. To
+ recover the old behavior, simply omit these flags.
+
+
+
+
+ ghc-pkg accepts a
+ flag which allows a user to override the location of the user package
+ database. Unlike databases specified using ,
+ a user package database configured this way respects
+ the flag.
+
+
+
+
+
+
+
+ Libraries
+
+
+ array
+
+
+
+ Version number XXXXX (was 0.5.0.0)
+
+
+
+
+
+
+ base
+
+
+
+ Version number XXXXX (was 4.7.0.0)
+
+
+
+
+
+
+ bin-package-db
+
+
+
+ This is an internal package, and should not be used.
+
+
+
+
+
+
+ binary
+
+
+
+ Version number XXXXX (was 0.7.1.0)
+
+
+
+
+
+
+ bytestring
+
+
+
+ Version number XXXXX (was 0.10.4.0)
+
+
+
+
+
+
+ Cabal
+
+
+
+ Version number XXXXX (was 1.18.1.3)
+
+
+
+
+
+
+ containers
+
+
+
+ Version number XXXXX (was 0.5.4.0)
+
+
+
+
+
+
+ deepseq
+
+
+
+ Version number XXXXX (was 1.3.0.2)
+
+
+
+
+
+
+ directory
+
+
+
+ Version number XXXXX (was 1.2.0.2)
+
+
+
+
+
+
+ filepath
+
+
+
+ Version number XXXXX (was 1.3.0.2)
+
+
+
+
+
+
+ ghc
+
+
+
+ Many internal functions in GHC related to package IDs have been
+ renamed to refer to package keys, e.g. PackageId
+ is now PackageKey, the wired-in names
+ such as primPackageId are now
+ primPackageKey, etc. This reflects a distinction
+ that we are now making: a package ID is, as before, the user-visible
+ ID from Cabal foo-1.0; a package key is now
+ a compiler-internal entity used for generating linking symbols, and
+ may not correspond at all to the package ID. In
+ particular, there may be multiple package keys per
+ package ID.
+
+
+
+
+
+
+ ghc-prim
+
+
+
+ Version number XXXXX (was 0.3.1.0)
+
+
+
+
+
+
+ haskell98
+
+
+
+ Version number XXXXX (was 2.0.0.3)
+
+
+
+
+
+
+ haskell2010
+
+
+
+ Version number XXXXX (was 1.1.1.1)
+
+
+
+
+
+
+ hoopl
+
+
+
+ Version number XXXXX (was 3.10.0.0)
+
+
+
+
+
+
+ hpc
+
+
+
+ Version number XXXXX (was 0.6.0.1)
+
+
+
+
+
+
+ integer-gmp
+
+
+
+ Version number XXXXX (was 0.5.1.0)
+
+
+
+
+
+
+ old-locale
+
+
+
+ Version number XXXXX (was 1.0.0.6)
+
+
+
+
+
+
+ old-time
+
+
+
+ Version number XXXXX (was 1.1.0.2)
+
+
+
+
+
+
+ process
+
+
+
+ Version number XXXXX (was 1.2.0.0)
+
+
+
+
+
+
+ template-haskell
+
+
+
+ Version number XXXXX (was 2.9.0.0)
+
+
+
+
+
+
+ time
+
+
+
+ Version number XXXXX (was 1.4.1)
+
+
+
+
+
+
+ unix
+
+
+
+ Version number XXXXX (was 2.7.0.0)
+
+
+
+
+
+
+ Win32
+
+
+
+ Version number XXXXX (was 2.3.0.1)
+
+
+
+
+
+
+
+ Known bugs
+
+
+
+ TODO FIXME
+
+
+
+
+
diff --git a/docs/users_guide/7.8.1-notes.xml b/docs/users_guide/7.8.1-notes.xml
deleted file mode 100644
index 1ae96bc80ed0..000000000000
--- a/docs/users_guide/7.8.1-notes.xml
+++ /dev/null
@@ -1,1206 +0,0 @@
-
-
- Release notes for version 7.8.1
-
-
- The significant changes to the various parts of the compiler are listed
- in the following sections. There have also been numerous bug fixes and
- performance improvements over the 7.6 branch.
-
-
-
- Highlights
-
-
- The highlights, since the 7.6 branch, are:
-
-
-
-
-
- OS X Mavericks with XCode 5 is now properly supported
- by GHC. As a result of this, GHC now uses Clang to
- preprocess Haskell code by default for Mavericks
- builds.
-
-
-
- Note that normally, GHC used gcc as
- the preprocessor for Haskell code (as it was the
- default everywhere,) which implements
- -traditional behavior. However,
- Clang is not 100% compatible with GCC's
- -traditional as it is rather
- implementation specified and does not match any
- specification. Clang is also more strict.
-
-
-
- As a result of this, when using Clang as the
- preprocessor, some programs which previously used
- -XCPP and the preprocessor will now
- fail to compile. Users who wish to retain the previous
- behavior are better off using cpphs as an external
- preprocessor for the time being.
-
-
-
- In the future, we hope to fix this by adopting a
- better preprocessor implementation independent of the
- C compiler (perhaps cpphs itself,) and ship that
- instead.
-
-
-
-
-
- By default, GHC has a new warning enabled,
- -fwarn-typed-holes, which causes the
- compiler to respond with the types of unbound
- variables it encounters in the source code. (It is
- reminiscient of the "holes" feature in languages such
- as Agda.)
-
- For more information, see .
-
-
-
-
-
- GHC can now perform simple evaluation of type-level
- natural numbers, when using the
- DataKinds extension. For example,
- given a type-level constraint such as (x + 3)
- ~ 5, GHC is able to infer that
- x is 2. Similarly, GHC can now
- understand type-level identities such as x +
- 0 ~ x.
-
-
-
- Note that the solving of these equations is only used
- to resolve unification variables - it does not
- generate new facts in the type checker. This is
- similar to how functional dependencies work.
-
-
-
-
-
- It is now possible to declare a 'closed' type
- family when using the
- TypeFamilies extension. A closed
- type family cannot have any
- instances created other than the ones in its
- definition.
-
- For more information, see .
-
-
-
-
-
- Use of the GeneralizedNewtypeDeriving
- extension is now subject to role checking,
- to ensure type safety of the derived instances. As this change
- increases the type safety of GHC, it is possible that some code
- that previously compiled will no longer work.
-
- For more information, see .
-
-
-
-
-
- GHC now supports overloading list literals using the new
- OverloadedLists extension.
-
- For more information, see .
-
-
-
-
-
- GHC now supports pattern synonyms, enabled by the
- -XPatternSynonyms extension,
- allowing you to name and abstract over patterns more
- easily.
-
- For more information, see .
-
-
- Note: For the GHC 7.8.1 version, this language feature
- should be regarded as a preview.
-
-
-
-
-
- There has been significant overhaul of the type
- inference engine and constraint solver, meaning it
- should be faster and use less memory.
-
-
-
-
-
- By default, GHC will now unbox all "small" strict
- fields in a data type. A "small" data type is one
- whose size is equivalent to or smaller than the native
- word size of the machine. This means you no longer
- have to specify UNPACK pragmas for
- e.g. strict Int fields. This also
- applies to floating-point values.
-
-
-
-
-
- GHC now has a brand-new I/O manager that scales significantly
- better for larger workloads compared to the previous one. It
- should scale linearly up to approximately 32 cores.
-
-
-
-
-
- The LLVM backend now supports 128- and 256-bit SIMD
- operations.
-
-
- Note carefully: this is only available with
- the LLVM backend, and should be considered
- experimental.
-
-
-
-
-
- The new code generator, after significant work by many
- individuals over the past several years, is now enabled by
- default. This is a complete rewrite of the STG to Cmm
- transformation. In general, your programs may get slightly
- faster.
-
-
-
- The old code generator has been removed completely.
-
-
-
-
-
- GHC now has substantially better support for cross
- compilation. In particular, GHC now has all the
- necessary patches to support cross compilation to
- Apple iOS, using the LLVM backend.
-
-
-
-
-
- PrimOps for comparing unboxed values now return
- Int# instead of Bool.
- This change is backwards incompatible. See
-
- this GHC wiki page for instructions how to update your
- existing code. See
- here for motivation and discussion of implementation details.
-
-
-
-
-
- New PrimOps for atomic memory operations.
- The casMutVar# PrimOp was introduced in
- GHC 7.2 (debugged in 7.4). This release also includes additional
- PrimOps for compare-and-swap (casArray# and
- casIntArray#) and one for fetch-and-add
- (fetchAddIntArray#).
-
-
-
-
-
- On Linux, FreeBSD and Mac OS X, GHCi now uses the
- system dynamic linker by default, instead of its built
- in (static) object linker. This is more robust
- cross-platform, and fixes many long-standing bugs (for
- example: constructors and destructors, weak symbols,
- etc work correctly, and several edge cases in the RTS
- are fixed.)
-
-
-
- As a result of this, GHCi (and Template Haskell) must
- now load dynamic object files, not static
- ones. To assist this, there is a new compilation flag,
- -dynamic-too, which when used
- during compilation causes GHC to emit both static and
- dynamic object files at the same time. GHC itself
- still defaults to static linking.
-
-
-
- Note that Cabal will correctly handle
- -dynamic-too for you automatically,
- especially when -XTemplateHaskell
- is needed - but you must tell Cabal you are
- using the TemplateHaskell
- extension.
-
-
-
- Currently, Dynamic GHCi and
- -dynamic-too are not supported on
- Windows (32bit or 64bit.)
-
-
-
-
-
- Typeable is now poly-kinded, making
- Typeable1, Typeable2,
- etc., obsolete, deprecated, and relegated to
- Data.OldTypeable. Furthermore, user-written
- instances of Typeable are now disallowed:
- use deriving or the new extension
- -XAutoDeriveTypeable, which will create
- Typeable instances for every datatype
- declared in the module.
-
-
-
-
-
- GHC now has a parallel compilation driver. When
- compiling with --make (which is on
- by default,) you may also specify
- -jN in order to compile
- N modules in
- parallel. (Note: this will automatically scale on
- multicore machines without specifying +RTS
- -N to the compiler.)
-
-
-
-
-
- GHC now has support for a new pragma,
- {-# MINIMAL #-}, allowing you to
- explicitly declare the minimal complete definition of
- a class. Should an instance not provide the minimal
- required definitions, a warning will be emitted.
-
-
-
-
-
- In GHC 7.10, Applicative will
- become a superclass of Monad,
- potentially breaking a lot of user code. To ease this
- transition, GHC now generates warnings when
- definitions conflict with the Applicative-Monad
- Proposal (AMP).
-
-
-
- A warning is emitted if a type is an instance of
- Monad but not of
- Applicative,
- MonadPlus but not
- Alternative, and when a local
- function named join,
- <*> or pure is
- defined.
-
-
-
- The warnings are enabled by default, and can be controlled
- using the new flag -f[no-]warn-amp.
-
-
-
-
-
- Using the new InterruptibleFFI
- extension, it's possible to now declare a foreign
- import as interruptible, as opposed
- to only safe or
- unsafe. An
- interruptible foreign call is the
- same as a safe call, but may be
- interrupted by asynchronous Haskell
- exceptions, such as those generated by
- throwTo or
- timeout.
-
-
-
- For more information (including the exact details on
- how the foreign thread is interrupted,) see .
-
-
-
-
-
- GHC's internal compiler pipeline is now exposed
- through a Hooks module inside the
- GHC API. These hooks allow you to control most of the
- internal compiler phase machinery, including compiling
- expressions, phase control, and linking.
-
-
-
- Note: this interface will likely see continuous
- refinement and API changes in future releases, so it
- should be considered a preview.
-
-
-
-
-
-
- Full details
-
- Language
-
-
-
- There is a new extension,
- NullaryTypeClasses, which
- allows you to declare a type class without any
- parameters.
-
-
-
-
-
-
-
- There is a new extension,
- NumDecimals, which allows you
- to specify an integer using compact "floating
- literal" syntax. This lets you say things like
- 1.2e6 :: Integer instead of
- 1200000
-
-
-
-
-
-
-
- There is a new extension,
- NegativeLiterals, which will
- cause GHC to interpret the expression
- -123 as fromIntegral
- (-123). Haskell 98 and Haskell 2010 both
- specify that it should instead desugar to
- negate (fromIntegral 123)
-
-
-
-
-
-
-
- There is a new extension,
- EmptyCase, which allows
- to write a case expression with no alternatives
- case ... of {}.
-
-
-
-
-
-
-
- The IncoherentInstances
- extension has seen a behavioral change, and is
- now 'liberated' and less conservative during
- instance resolution. This allows more programs to
- compile than before.
-
-
- Now, IncoherentInstances will
- always pick an arbitrary matching instance, if
- multiple ones exist.
-
-
-
-
-
-
-
- A new built-in function coerce is
- provided that allows to safely coerce values between types
- that have the same run-time-presentation, such as
- newtypes, but also newtypes inside containers. See the
- haddock documentation of
- coerce
- and of the class
- Coercible
- for more details.
-
-
- This feature is included in this release as a technology
- preview, and may change its syntax and/or semantics in the
- next release.
-
-
-
-
-
-
-
- The new pragma, {-# MINIMAL #-},
- allows to explicitly declare the minimal complete
- definition of a class. Should an instance not provide
- the minimal required definitions, a warning will be
- emitted.
-
-
-
- See for more details.
-
-
-
-
-
-
- Compiler
-
-
-
- GHC can now build both static and dynamic object
- files at the same time in a single compilation
- pass, when given the
- -dynamic-too flag. This will
- produce both a statically-linkable
- .o object file, and a
- dynamically-linkable .dyn_o
- file. The output suffix of the dynamic objects can
- be controlled by the flag
- -dynosuf.
-
-
-
- Note that GHC still builds statically by default.
-
-
-
-
- GHC now supports a
- --show-options flag, which will
- dump all of the flags it supports to standard out.
-
-
-
-
- GHC now supports warning about overflow of integer
- literals, enabled by
- -fwarn-overflowed-literals. It
- is enabled by default.
-
-
-
-
- It's now possible to switch the system linker on Linux
- (between GNU gold and GNU ld) at runtime without problem.
-
-
-
-
- The -fwarn-dodgy-imports flag now warns
- in the case an import statement hides an
- entity which is not exported.
-
-
-
-
- The LLVM backend was overhauled and rewritten, and
- should hopefully be easier to maintain and work on
- in the future.
-
-
-
-
- GHC now detects annotation changes during
- recompilation, and correctly persists new
- annotations.
-
-
-
-
- There is a new set of primops for utilizing
- hardware-based prefetch instructions, to help
- guide the processor's caching decisions.
-
-
- Currently, these are only supported with the LLVM
- backend and x86/amd64 backends.
-
-
-
-
-
-
- GHCi
-
-
- The monomorphism restriction is now turned off
- by default in GHCi.
-
-
-
-
- GHCi now supports a prompt2
- setting, which allows you to customize the
- continuation prompt of multi-line input.
-
- For more information, see .
-
-
-
-
- The new :shows paths command
- shows the current working directory and the
- current search path for Haskell modules.
-
-
-
-
-
- On Linux, the static GHCi linker now supports weak symbols.
-
-
-
-
-
- The (static) GHCi linker now runs constructors for
- linked libraries. This means for example that C
- code using
- __attribute__((constructor))
- can now properly be loaded into GHCi.
-
-
-
- Note: destructors are not supported.
-
-
-
-
-
-
- Template Haskell
-
-
-
- Template Haskell now supports Roles.
-
-
-
-
- Template Haskell now supports annotation pragmas.
-
-
-
-
- Typed Template Haskell expressions are now supported. See
- for more details.
-
-
-
-
- Template Haskell declarations, types, patterns, and
- untyped expressions are no longer
- typechecked at all. This is a backwards-compatible change
- since it allows strictly more programs to be typed.
-
-
-
-
-
-
- Runtime system
-
-
-
- The RTS linker can now unload object code at
- runtime (when using the GHC API
- ObjLink module.) Previously,
- GHC would not unload the old object file, causing
- a gradual memory leak as more objects were loaded
- over time.
-
-
-
- Note that this change in unloading behavior
- only affects statically
- linked binaries, and not dynamic ones.
-
-
-
-
-
- The performance of StablePtrs and
- StableNames has been improved.
-
-
-
-
-
- The default maximum stack size has
- increased. Previously, it defaulted to 8m
- (equivalent to passing +RTS
- -K8m. Now, GHC will use up-to 80% of the
- physical memory available at
- runtime.
-
-
-
-
-
-
- Build system
-
-
-
- GHC >= 7.4 is now required for bootstrapping.
-
-
-
-
- GHC can now be built with Clang, and use Clang as
- the preprocessor for Haskell code. Only Clang
- version 3.4 (or Apple LLVM Clang 5.0) or beyond is
- reliably supported.
-
-
-
- Note that normally, GHC uses
- gcc as the preprocessor for
- Haskell code, which implements
- -traditional behavior. However,
- Clang is not 100% compatible with GCC's
- -traditional as it is rather
- implementation specified, and is more strict.
-
-
-
- As a result of this, when using Clang as the
- preprocessor, some programs which previously used
- -XCPP and the preprocessor will
- now fail to compile. Users who wish to retain the
- previous behavior are better off using cpphs.
-
-
-
-
-
-
-
- Libraries
-
-
- There have been some changes that have effected multiple
- libraries:
-
-
-
-
-
- TODO FIXME
-
-
-
-
-
- array
-
-
-
- Version number 0.5.0.0 (was 0.4.0.1)
-
-
-
-
-
-
- base
-
-
-
- Version number 4.7.0.0 (was 4.6.0.1)
-
-
-
-
- The Control.Category module now has the
- PolyKinds extension enabled, meaning
- that instances of Category no longer
- need be of kind * -> * -> *.
-
-
-
-
- There are now Foldable and Traversable
- instances for Either a, Const r, and (,) a.
-
-
-
-
- There is now a Monoid instance for Const.
-
-
-
-
- There is now a Data instance for Data.Version.
-
-
-
-
- There are now Eq, Ord, Show and Read instances for ZipList.
-
-
-
-
- There are now Eq, Ord, Show and Read instances for Down.
-
-
-
-
- There are now Eq, Ord, Show, Read and Generic instances for types in GHC.Generics (U1, Par1, Rec1, K1, M1, (:+:), (:*:), (:.:)).
-
-
-
-
- A zero-width unboxed poly-kinded Proxy#
- was added to GHC.Prim. It can be used to make it so
- that there is no the operational overhead for passing around proxy
- arguments to model type application.
-
-
-
-
- Control.Concurrent.MVar has a new
- implementation of readMVar, which
- fixes a long-standing bug where
- readMVar is only atomic if there
- are no other threads running
- putMVar.
- readMVar now is atomic, and is
- guaranteed to return the value from the first
- putMVar. There is also a new tryReadMVar
- which is a non-blocking version.
-
-
-
-
- There are now byte endian-swapping primitives
- available in Data.Word, which
- use optimized machine instructions when available.
-
-
-
-
- Data.Bool now exports
- bool :: a -> a -> Bool -> a, analogously
- to maybe and either
- in their respective modules.
-
-
-
-
- Rewrote portions of Text.Printf, and
- made changes to Numeric (added
- Numeric.showFFloatAlt and
- Numeric.showGFloatAlt) and
- GHC.Float (added
- formatRealFloatAlt) to support it.
- The rewritten version is extensible to user types, adds a
- "generic" format specifier "%v",
- extends the printf spec
- to support much of C's printf(3)
- functionality, and fixes the spurious warnings about
- using Text.Printf.printf at
- (IO a) while ignoring the return value.
- These changes were contributed by Bart Massey.
-
-
-
-
- The minimal complete definitions for all
- type-classes with cyclic default implementations
- have been explicitly annotated with the new
- {-# MINIMAL #-} pragma.
-
-
-
-
- Control.Applicative.WrappedMonad,
- which can be used to convert a Monad
- to an Applicative, has now
- a Monad m => Monad (WrappedMonad m)
- instance.
-
-
-
-
-
-
- bin-package-db
-
-
-
- This is an internal package, and should not be used.
-
-
-
-
-
-
- binary
-
-
-
- Version number 0.7.1.0 (was 0.5.1.1)
-
-
-
-
-
-
- bytestring
-
-
-
- Version number 0.10.4.0 (was 0.10.0.0)
-
-
-
-
-
-
- Cabal
-
-
-
- Version number 1.18.1.3 (was 1.16.0)
-
-
-
-
-
-
- containers
-
-
-
- Version number 0.5.4.0 (was 0.5.0.0)
-
-
-
-
-
-
- deepseq
-
-
-
- Version number 1.3.0.2 (was 1.3.0.1)
-
-
-
-
-
-
- directory
-
-
-
- Version number 1.2.0.2 (was 1.2.0.1)
-
-
-
-
- The function findExecutables
- now correctly checks to see if the execute bit is
- set on Linux, rather than just looking in
- $PATH.
-
-
-
-
- There are several new functions for finding files,
- including findFiles and
- findFilesWith, which allow you
- to search for a file given a set of filepaths, and
- run a predicate over them.
-
-
-
-
-
-
- filepath
-
-
-
- Version number 1.3.0.2 (was 1.3.0.1)
-
-
-
-
-
-
- ghc-prim
-
-
-
- Version number 0.3.1.0 (was 0.3.0.0)
-
-
-
-
- The type-classes Eq and
- Ord have been annotated with
- the new {-# MINIMAL #-}
- pragma.
-
-
-
-
- There is a new type exposed by
- GHC.Types, called
- SPEC, which can be used to
- inform GHC to perform call-pattern specialisation
- extremely aggressively. See for more details
- concerning -fspec-constr.
-
-
-
-
-
-
- haskell98
-
-
-
- Version number 2.0.0.3 (was 2.0.0.2)
-
-
-
-
-
-
- haskell2010
-
-
-
- Version number 1.1.1.1 (was 1.1.1.0)
-
-
-
-
-
-
- hoopl
-
-
-
- Version number 3.10.0.0 (was 3.9.0.0)
-
-
-
-
-
-
- hpc
-
-
-
- Version number 0.6.0.1 (was 0.6.0.0)
-
-
-
-
-
-
- integer-gmp
-
-
-
- Version number 0.5.1.0 (was 0.5.0.0)
-
-
-
-
-
-
- old-locale
-
-
-
- Version number 1.0.0.6 (was 1.0.0.5)
-
-
-
-
-
-
- old-time
-
-
-
- Version number 1.1.0.2 (was 1.1.0.1)
-
-
-
-
-
-
- process
-
-
-
- Version number 1.2.0.0 (was 1.1.0.2)
-
-
-
-
- Several bugs have been fixed, including deadlocks
- in readProcess and
- readProcessWithExitCode.
-
-
-
-
-
-
- template-haskell
-
-
-
- Version number 2.9.0.0 (was 2.8.0.0)
-
-
-
-
- Typed Template Haskell expressions are now
- supported. See
- for more details.
-
-
-
-
- There is now support for roles.
-
-
-
-
- There is now support for annotation pragmas.
-
-
-
-
-
-
- time
-
-
-
- Version number 1.4.1 (was 1.4.1)
-
-
-
-
-
-
- unix
-
-
-
- Version number 2.7.0.0 (was 2.6.0.0)
-
-
-
-
- A crash in getGroupEntryForID
- (and related functions like
- getUserEntryForID and
- getUserEntryForName) in
- multi-threaded applications has been fixed.
-
-
-
-
- The functions getGroupEntryForID
- and getUserEntryForID now fail
- with a isDoesNotExist error when
- the specified ID cannot be found.
-
-
-
-
-
-
- Win32
-
-
-
- Version number 2.3.0.0 (was 2.3.0.0)
-
-
-
-
-
-
-
- Known bugs
-
-
-
- On OS X 10.7 and beyond, with default build settings,
- the runtime system currently suffers from a fairly
- large (30%) performance regression in the parallel
- garbage collector when using
- -threaded impacting its thoroughput
- and overall scalability.
-
-
- This is due to the fact that the OS X 10.7+ toolchain
- does not (by default) support register variables, or a
- fast __thread implementation. Note
- that this can be worked around by building GHC using
- GCC instead on OS X platforms, but the binary
- distribution then requires GCC later.
-
-
-
-
-
- On Windows, -dynamic-too is unsupported.
-
-
-
-
-
- On Windows, we currently don't ship dynamic libraries
- or use a dynamic GHCi, unlike Linux, FreeBSD or OS X.
-
-
-
-
-
- On 64bit Windows, the static linker currently suffers
- from some rather large bugs, which we hope to have
- some fixes for soon.
-
-
-
-
-
diff --git a/docs/users_guide/codegens.xml b/docs/users_guide/codegens.xml
index f854e1137e36..d2a805a3ee04 100644
--- a/docs/users_guide/codegens.xml
+++ b/docs/users_guide/codegens.xml
@@ -38,25 +38,27 @@
You must install and have LLVM available on your PATH for the LLVM
code generator to work. Specifically GHC needs to be able to call the
- optand llc tools. Secondly, if you
+ opt and llc tools. Secondly, if you
are running Mac OS X with LLVM 3.0 or greater then
you also need the Clang c
- compiler compiler available on your PATH. Clang and LLVM are
- both included with OS X by default from 10.6 onwards.
+ compiler compiler available on your PATH.
To install LLVM and Clang:
Linux: Use your package management tool.
- Mac OS X: LLVM and Clang are included by
- default from 10.6 and later. For
- 10.5 you should install the
- Homebrew package
- manager for OS X. Alternatively you can download binaries for LLVM
- and Clang from
- here.
+ Mac OS X: Clang is included by
+ default on recent OS X machines when XCode is installed (from
+ 10.6 and later). LLVM is not included. In
+ order to use the LLVM based code generator, you should install
+ the Homebrew
+ package manager for OS X. Alternatively you can download
+ binaries for LLVM and Clang from here.
+
Windows: You should download binaries for
LLVM and clang from
here.
diff --git a/docs/users_guide/external_core.xml b/docs/users_guide/external_core.xml
deleted file mode 100644
index e4354410efa6..000000000000
--- a/docs/users_guide/external_core.xml
+++ /dev/null
@@ -1,1804 +0,0 @@
-
-
-
-
-
-
-
-
- An External Representation for the GHC Core Language (For GHC 6.10)
-
- Andrew Tolmach, Tim Chevalier ({apt,tjc}@cs.pdx.edu) and The GHC Team
-
- This chapter provides a precise definition for the GHC Core
- language, so that it can be used to communicate between GHC and new
- stand-alone compilation tools such as back-ends or
- optimizers.
- This is a draft document, which attempts
- to describe GHC’s current behavior as precisely as possible. Working
- notes scattered throughout indicate areas where further work is
- needed. Constructive comments are very welcome, both on the
- presentation, and on ways in which GHC could be improved in order to
- simplify the Core story.
-
- Support for generating external Core (post-optimization) was
- originally introduced in GHC 5.02. The definition of external Core in
- this document reflects the version of external Core generated by the
- HEAD (unstable) branch of GHC as of May 3, 2008 (version 6.9), using
- the compiler flag -fext-core. We expect that GHC 6.10 will be
- consistent with this definition.
-
- The definition includes a formal grammar and an informal semantics.
- An executable typechecker and interpreter (in Haskell), which
- formally embody the static and dynamic semantics, are available
- separately.
-
-
- Introduction
-
- The Glasgow Haskell Compiler (GHC) uses an intermediate language,
- called Core, as its internal program representation within the
- compiler’s simplification phase. Core resembles a subset of
- Haskell, but with explicit type annotations in the style of the
- polymorphic lambda calculus (Fω).
-
- GHC’s front-end translates full Haskell 98 (plus some extensions)
- into Core. The GHC optimizer then repeatedly transforms Core
- programs while preserving their meaning. A Core Lint pass in GHC
- typechecks Core in between transformation passes (at least when
- the user enables linting by setting a compiler flag), verifying
- that transformations preserve type-correctness. Finally, GHC’s
- back-end translates Core into STG-machine code stg-machine and then into C
- or native code.
-
- Two existing papers discuss the original rationale for the design
- and use of Core ghc-inliner,comp-by-trans-scp, although the (two different) idealized
- versions of Core described therein differ in significant ways from
- the actual Core language in current GHC. In particular, with the
- advent of GHC support for generalized algebraic datatypes (GADTs)
- gadts Core was extended beyond its previous
- Fω-style incarnation to support type
- equality constraints and safe coercions, and is now based on a
- system known as FCsystem-fc.
-
- Researchers interested in writing just part of a Haskell compiler,
- such as a new back-end or a new optimizer pass, might like to use
- GHC to provide the other parts of the compiler. For example, they
- might like to use GHC’s front-end to parse, desugar, and
- type-check source Haskell, then feeding the resulting code to
- their own back-end tool. As another example, they might like to
- use Core as the target language for a front-end compiler of their
- own design, feeding externally synthesized Core into GHC in order
- to take advantage of GHC’s optimizer, code generator, and run-time
- system. Without external Core, there are two ways for compiler
- writers to do this: they can link their code into the GHC
- executable, which is an arduous process, or they can use the GHC
- API ghc-api to do the same task more cleanly. Both ways require new
- code to be written in Haskell.
-
- We present a precisely specified external format for Core files.
- The external format is text-based and human-readable, to promote
- interoperability and ease of use. We hope this format will make it
- easier for external developers to use GHC in a modular way.
-
- It has long been true that GHC prints an ad-hoc textual
- representation of Core if you set certain compiler flags. But this
- representation is intended to be read by people who are debugging
- the compiler, not by other programs. Making Core into a
- machine-readable, bi-directional communication format requires:
-
-
-
- precisely specifying the external format of Core;
-
-
- modifying GHC to generate external Core files
- (post-simplification; as always, users can control the exact
- transformations GHC does with command-line flags);
-
-
- modifying GHC to accept external Core files in place of
- Haskell source files (users will also be able to control what
- GHC does to those files with command-line flags).
-
-
-
-
- The first two facilities will let developers couple GHC’s
- front-end (parser, type-checker, desugarer), and optionally its
- optimizer, with new back-end tools. The last facility will let
- developers write new Core-to-Core transformations as an external
- tool and integrate them into GHC. It will also allow new
- front-ends to generate Core that can be fed into GHC’s optimizer
- or back-end.
-
- However, because there are many (undocumented) idiosyncracies in
- the way GHC produces Core from source Haskell, it will be hard for
- an external tool to produce Core that can be integrated with
- GHC-produced Core (e.g., for the Prelude), and we don’t aim to
- support this. Indeed, for the time being, we aim to support only
- the first two facilities and not the third: we define and
- implement Core as an external format that GHC can use to
- communicate with external back-end tools, and defer the larger
- task of extending GHC to support reading this external format back
- in.
-
- This document addresses the first requirement, a formal Core
- definition, by proposing a formal grammar for an
- external representation of Core,
- and an informal semantics.
-
- GHC supports many type system extensions; the External Core
- printer built into GHC only supports some of them. However,
- External Core should be capable of representing any Haskell 98
- program, and may be able to represent programs that require
- certain type system extensions as well. If a program uses
- unsupported features, GHC may fail to compile it to Core when the
- -fext-core flag is set, or GHC may successfully compile it to
- Core, but the external tools will not be able to typecheck or
- interpret it.
-
- Formal static and dynamic semantics in the form of an executable
- typechecker and interpreter are available separately in the GHC
- source tree
- http://git.haskell.org/ghc.git
- under utils/ext-core.
-
-
-
- External Grammar of Core
-
- In designing the external grammar, we have tried to strike a
- balance among a number of competing goals, including easy
- parseability by machines, easy readability by humans, and adequate
- structural simplicity to allow straightforward presentations of
- the semantics. Thus, we had to make some compromises.
- Specifically:
-
-
- In order to avoid explosion of parentheses, we support
- standard precedences and short-cuts for expressions, types,
- and kinds. Thus we had to introduce multiple non-terminals for
- each of these syntactic categories, and as a result, the
- concrete grammar is longer and more complex than the
- underlying abstract syntax.
-
- On the other hand, we have kept the grammar simpler by
- avoiding special syntax for tuple types and terms. Tuples
- (both boxed and unboxed) are treated as ordinary constructors.
-
- All type abstractions and applications are given in full, even
- though some of them (e.g., for tuples) could be reconstructed;
- this means a parser for Core does not have to reconstruct
- types.
- These choices are certainly debatable. In
- particular, keeping type applications on tuples and case arms
- considerably increases the size of Core files and makes them less
- human-readable, though it allows a Core parser to be simpler.
-
-
- The syntax of identifiers is heavily restricted (to just
- alphanumerics and underscores); this again makes Core easier
- to parse but harder to read.
-
-
- We use the following notational conventions for syntax:
-
-
-
-
-
- [ pat ]
- optional
-
-
-
- { pat }
- zero or more repetitions
-
-
-
-
- { pat }+
-
- one or more repetitions
-
-
-
-
- pat1 ∣ pat2
-
- choice
-
-
-
-
- fibonacci
-
- terminal syntax in typewriter font
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Module
- module
- →
-
- %module mident { tdef ; }{ vdefg ; }
-
-
-
-
-
- Type defn.
- tdef
- →
-
- %data qtycon { tbind } = { [ cdef {; cdef } ] }
-
- algebraic type
-
-
- ∣
-
- %newtype qtycon qtycon { tbind } = ty
-
- newtype
-
-
-
- Constr. defn.
- cdef
- →
-
- qdcon { @ tbind }{ aty }+
-
-
-
-
- Value defn.
- vdefg
- →
- %rec { vdef { ; vdef } }
- recursive
-
-
-
- ∣
- vdef
- non-recursive
-
-
-
- vdef
- →
- qvar :: ty = exp
-
-
-
-
- Atomic expr.
- aexp
- →
- qvar
- variable
-
-
-
- ∣
- qdcon
- data constructor
-
-
-
- ∣
- lit
- literal
-
-
-
- ∣
- ( exp )
- nested expr.
-
-
-
- Expression
- exp
- →
- aexp
- atomic expresion
-
-
-
- ∣
- aexp { arg }+
- application
-
-
-
- ∣
- \ { binder }+ &arw; exp
- abstraction
-
-
-
- ∣
- %let vdefg %in exp
- local definition
-
-
-
- ∣
- %case ( aty ) exp %of vbind { alt { ; alt } }
- case expression
-
-
-
- ∣
- %cast exp aty
- type coercion
-
-
-
- ∣
- %note " { char } " exp
- expression note
-
-
-
- ∣
- %external ccall " { char } " aty
- external reference
-
-
-
- ∣
- %dynexternal ccall aty
- external reference (dynamic)
-
-
-
- ∣
- %label " { char } "
- external label
-
-
-
- Argument
- arg
- →
- @ aty
- type argument
-
-
-
- ∣
- aexp
- value argument
-
-
-
- Case alt.
- alt
- →
- qdcon { @ tbind }{ vbind } &arw; exp
- constructor alternative
-
-
-
- ∣
- lit &arw; exp
- literal alternative
-
-
-
- ∣
- %_ &arw; exp
- default alternative
-
-
-
- Binder
- binder
- →
- @ tbind
- type binder
-
-
-
- ∣
- vbind
- value binder
-
-
-
- Type binder
- tbind
- →
- tyvar
- implicitly of kind *
-
-
-
- ∣
- ( tyvar :: kind )
- explicitly kinded
-
-
-
- Value binder
- vbind
- →
- ( var :: ty )
-
-
-
-
- Literal
- lit
- →
- ( [-] { digit }+:: ty )
- integer
-
-
-
- ∣
- ( [-] { digit }+% { digit }+:: ty )
- rational
-
-
-
- ∣
- ( ' char ' :: ty )
- character
-
-
-
- ∣
- ( " { char } " :: ty )
- string
-
-
-
- Character
- char
- →
- any ASCII character in range 0x20-0x7E except 0x22,0x27,0x5c
-
-
-
- ∣
- \x hex hex
- ASCII code escape sequence
-
-
-
- hex
- →
- 0∣…∣9 ∣a ∣…∣f
-
-
-
-
- Atomic type
- aty
- →
- tyvar
- type variable
-
-
-
- ∣
- qtycon
- type constructor
-
-
-
- ∣
- ( ty )
- nested type
-
-
-
- Basic type
- bty
- →
- aty
- atomic type
-
-
-
- ∣
- bty aty
- type application
-
-
-
- ∣
- %trans aty aty
- transitive coercion
-
-
-
- ∣
- %sym aty
- symmetric coercion
-
-
-
- ∣
- %unsafe aty aty
- unsafe coercion
-
-
-
- ∣
- %left aty
- left coercion
-
-
-
- ∣
- %right aty
- right coercion
-
-
-
- ∣
- %inst aty aty
- instantiation coercion
-
-
-
- Type
- ty
- →
- bty
- basic type
-
-
-
- ∣
- %forall { tbind }+. ty
- type abstraction
-
-
-
- ∣
- bty &arw; ty
- arrow type construction
-
-
-
- Atomic kind
- akind
- →
- *
- lifted kind
-
-
-
- ∣
- #
- unlifted kind
-
-
-
- ∣
- ?
- open kind
-
-
-
- ∣
- bty :=: bty
- equality kind
-
-
-
- ∣
- ( kind )
- nested kind
-
-
-
- Kind
- kind
- →
- akind
- atomic kind
-
-
-
- ∣
- akind &arw; kind
- arrow kind
-
-
-
- Identifier
- mident
- →
- pname : uname
- module
-
-
-
- tycon
- →
- uname
- type constr.
-
-
-
- qtycon
- →
- mident . tycon
- qualified type constr.
-
-
-
- tyvar
- →
- lname
- type variable
-
-
-
- dcon
- →
- uname
- data constr.
-
-
-
- qdcon
- →
- mident . dcon
- qualified data constr.
-
-
-
- var
- →
- lname
- variable
-
-
-
- qvar
- →
- [ mident . ] var
- optionally qualified variable
-
-
-
- Name
- lname
- →
- lower { namechar }
-
-
-
-
- uname
- →
- upper { namechar }
-
-
-
-
- pname
- →
- { namechar }+
-
-
-
-
- namechar
- →
- lower ∣ upper ∣ digit
-
-
-
-
- lower
- →
- a ∣ b ∣ … ∣ z ∣ _
-
-
-
-
- upper
- →
- A ∣ B ∣ … ∣ Z
-
-
-
-
- digit
- →
- 0 ∣ 1 ∣ … ∣ 9
-
-
-
-
-
-
-
-
- Informal Semantics
-
- At the term level, Core resembles a explicitly-typed polymorphic
- lambda calculus (Fω), with the addition of
- local let bindings, algebraic type definitions, constructors, and
- case expressions, and primitive types, literals and operators. Its
- type system is richer than that of System F, supporting explicit
- type equality coercions and type functions.system-fc
-
- In this section we concentrate on the less obvious points about
- Core.
-
-
- Program Organization and Modules
-
- Core programs are organized into modules, corresponding directly
- to source-level Haskell modules. Each module has a identifying
- name mident. A module identifier consists of a package name
- followed by a module name, which may be hierarchical: for
- example, base:GHC.Base is the module identifier for GHC’s Base
- module. Its name is Base, and it lives in the GHC hierarchy
- within the base package. Section 5.8 of the GHC users’ guide
- explains package names ghc-user-guide. In particular, note that a Core
- program may contain multiple modules with the same (possibly
- hierarchical) module name that differ in their package names. In
- some of the code examples that follow, we will omit package
- names and possibly full hierarchical module names from
- identifiers for brevity, but be aware that they are always
- required.
- A possible improvement to the Core syntax
- would be to add explicit import lists to Core modules, which could be
- used to specify abbrevations for long qualified names. This would make
- the code more human-readable.
-
-
- Each module may contain the following kinds of top-level
- declarations:
-
-
-
- Algebraic data type declarations, each defining a type
- constructor and one or more data constructors;
-
-
- Newtype declarations, corresponding to Haskell newtype
- declarations, each defining a type constructor and a
- coercion name; and
-
-
- Value declarations, defining the types and values of
- top-level variables.
-
-
-
-
- No type constructor, data constructor, or top-level value may be
- declared more than once within a given module. All the type
- declarations are (potentially) mutually recursive. Value
- declarations must be in dependency order, with explicit grouping
- of potentially mutually recursive declarations.
-
- Identifiers defined in top-level declarations may be external or
- internal. External identifiers can be referenced from any other
- module in the program, using conventional dot notation (e.g.,
- base:GHC.Base.Bool, base:GHC.Base.True). Internal identifiers
- are visible only within the defining module. All type and data
- constructors are external, and are always defined and referenced
- using fully qualified names (with dots).
-
- A top-level value is external if it is defined and referenced
- using a fully qualified name with a dot (e.g., main:MyModule.foo = ...);
- otherwise, it is internal (e.g., bar = ...). Note that
- Core’s notion of an external identifier does not necessarily
- coincide with that of exported identifier in a Haskell source
- module. An identifier can be an external identifier in Core, but
- not be exported by the original Haskell source
- module.
- Two examples of such identifiers are: data
- constructors, and values that potentially appear in an unfolding. For an
- example of the latter, consider Main.foo = ... Main.bar ..., where
- Main.foo is inlineable. Since bar appears in foo’s unfolding, it is
- defined and referenced with an external name, even if bar was not
- exported by the original source module.
-
- However, if an identifier was exported by the Haskell source
- module, it will appear as an external name in Core.
-
- Core modules have no explicit import or export lists. Modules
- may be mutually recursive. Note that because of the latter fact,
- GHC currently prints out the top-level bindings for every module
- as a single recursive group, in order to avoid keeping track of
- dependencies between top-level values within a module. An
- external Core tool could reconstruct dependencies later, of
- course.
-
- There is also an implicitly-defined module ghc-prim:GHC.Prim,
- which exports the built-in types and values that must be
- provided by any implementation of Core (including GHC). Details
- of this module are in the Primitive Module section.
-
- A Core program is a collection of distinctly-named modules that
- includes a module called main:Main having an exported value
- called main:ZCMain.main of type base:GHC.IOBase.IO a (for some
- type a). (Note that the strangely named wrapper for main is the
- one exception to the rule that qualified names defined within a
- module m must have module name m.)
-
- Many Core programs will contain library modules, such as
- base:GHC.Base, which implement parts of the Haskell standard
- library. In principle, these modules are ordinary Haskell
- modules, with no special status. In practice, the requirement on
- the type of main:Main.main implies that every program will
- contain a large subset of the standard library modules.
-
-
-
- Namespaces
-
- There are five distinct namespaces:
-
- module identifiers (mident),
- type constructors (tycon),
- type variables (tyvar),
- data constructors (dcon),
- term variables (var).
-
-
-
- Spaces (1), (2+3), and (4+5) can be distinguished from each
- other by context. To distinguish (2) from (3) and (4) from (5),
- we require that data and type constructors begin with an
- upper-case character, and that term and type variables begin
- with a lower-case character.
-
- Primitive types and operators are not syntactically
- distinguished.
-
- Primitive coercion operators, of which there are six, are
- syntactically distinguished in the grammar. This is because
- these coercions must be fully applied, and because
- distinguishing their applications in the syntax makes
- typechecking easier.
-
- A given variable (type or term) may have multiple definitions
- within a module. However, definitions of term variables never
- shadow one another: the scope of the definition of a given
- variable never contains a redefinition of the same variable.
- Type variables may be shadowed. Thus, if a term variable has
- multiple definitions within a module, all those definitions must
- be local (let-bound). The only exception to this rule is that
- (necessarily closed) types labelling %external expressions may
- contain tyvar bindings that shadow outer bindings.
-
- Core generated by GHC makes heavy use of encoded names, in which
- the characters Z and z are used to introduce escape sequences
- for non-alphabetic characters such as dollar sign $ (zd), hash #
- (zh), plus + (zp), etc. This is the same encoding used in .hi
- files and in the back-end of GHC itself, except that we
- sometimes change an initial z to Z, or vice-versa, in order to
- maintain case distinctions.
-
- Finally, note that hierarchical module names are z-encoded in
- Core: for example, base:GHC.Base.foo is rendered as
- base:GHCziBase.foo. A parser may reconstruct the module
- hierarchy, or regard GHCziBase as a flat name.
-
-
-
- Types and Kinds
-
- In Core, all type abstractions and applications are explicit.
- This make it easy to typecheck any (closed) fragment of Core
- code. An full executable typechecker is available separately.
-
-
- Types
-
- Types are described by type expressions, which are built from
- named type constructors and type variables using type
- application and universal quantification. Each type
- constructor has a fixed arity ≥ 0. Because it is so widely
- used, there is special infix syntax for the fully-applied
- function type constructor (&arw;). (The prefix identifier for
- this constructor is ghc-prim:GHC.Prim.ZLzmzgZR; this should
- only appear in unapplied or partially applied form.)
-
- There are also a number of other primitive type constructors
- (e.g., Intzh) that are predefined in the GHC.Prim module, but
- have no special syntax. %data and %newtype declarations
- introduce additional type constructors, as described below.
- Type constructors are distinguished solely by name.
-
-
-
- Coercions
-
- A type may also be built using one of the primitive coercion
- operators, as described in the Namespaces section. For details on the
- meanings of these operators, see the System FC paper system-fc. Also
- see the Newtypes section for
- examples of how GHC uses coercions in Core code.
-
-
-
- Kinds
- As described in the Haskell definition, it is necessary to
- distinguish well-formed type-expressions by classifying them
- into different kindshaskell98, p. 41. In particular, Core
- explicitly records the kind of every bound type variable.
-
- In addition, Core’s kind system includes equality kinds, as in
- System FC system-fc. An application of a built-in coercion, or of a
- user-defined coercion as introduced by a newtype declaration,
- has an equality kind.
-
-
-
- Lifted and Unlifted Types
-
- Semantically, a type is lifted if and only if it has bottom as
- an element. We need to distinguish them because operationally,
- terms with lifted types may be represented by closures; terms
- with unlifted types must not be represented by closures, which
- implies that any unboxed value is necessarily unlifted. We
- distinguish between lifted and unlifted types by ascribing
- them different kinds.
-
- Currently, all the primitive types are unlifted (including a
- few boxed primitive types such as ByteArrayzh). Peyton-Jones
- and Launchbury pj:unboxed described the ideas behind unboxed and
- unlifted types.
-
-
-
- Type Constructors; Base Kinds and Higher Kinds
-
- Every type constructor has a kind, depending on its arity and
- whether it or its arguments are lifted.
-
- Term variables can only be assigned types that have base
- kinds: the base kinds are *, #, and ?. The three base kinds
- distinguish the liftedness of the types they classify: *
- represents lifted types; # represents unlifted types; and ? is
- the open kind, representing a type that may be either lifted
- or unlifted. Of these, only * ever appears in Core type
- declarations generated from user code; the other two are
- needed to describe certain types in primitive (or otherwise
- specially-generated) code (which, after optimization, could
- potentially appear anywhere).
-
- In particular, no top-level identifier (except in
- ghc-prim:GHC.Prim) has a type of kind # or ?.
-
- Nullary type constructors have base kinds: for example, the
- type Int has kind *, and Int# has kind #.
-
- Non-nullary type constructors have higher kinds: kinds that
- have the form
- k1&arw;k2, where
- k1 and k2 are
- kinds. For example, the function type constructor &arw; has
- kind * &arw; (* &arw; *). Since Haskell allows abstracting
- over type constructors, type variables may have higher kinds;
- however, much more commonly they have kind *, so that is the
- default if a type binder omits a kind.
-
-
-
-
- Type Synonyms and Type Equivalence
-
- There is no mechanism for defining type synonyms
- (corresponding to Haskell type declarations).
-
- Type equivalence is just syntactic equivalence on type
- expressions (of base kinds) modulo:
-
-
- alpha-renaming of variables bound in %forall types;
- the identity a &arw; b ≡ ghc-prim:GHC.Prim.ZLzmzgZR a b
-
-
-
-
-
- Algebraic data types
-
- Each data declaration introduces a new type constructor and a
- set of one or more data constructors, normally corresponding
- directly to a source Haskell data declaration. For example, the
- source declaration
-
-
-data Bintree a =
- Fork (Bintree a) (Bintree a)
- | Leaf a
-
-
- might induce the following Core declaration
-
-
-%data Bintree a = {
- Fork (Bintree a) (Bintree a);
- Leaf a)}
-
-
- which introduces the unary type constructor Bintree of kind
- *&arw;* and two data constructors with types
-
-
-Fork :: %forall a . Bintree a &arw; Bintree a &arw; Bintree a
-Leaf :: %forall a . a &arw; Bintree a
-
-
- We define the arity of each data constructor to be the number of
- value arguments it takes; e.g. Fork has arity 2 and Leaf has
- arity 1.
-
- For a less conventional example illustrating the possibility of
- higher-order kinds, the Haskell source declaration
-
-
-data A f a = MkA (f a)
-
-
- might induce the Core declaration
-
-
-%data A (f::*&arw;*) a = { MkA (f a) }
-
-
- which introduces the constructor
-
-
-MkA :: %forall (f::*&arw;*) a . (f a) &arw; (A f) a
-
-
- GHC (like some other Haskell implementations) supports an
- extension to Haskell98 for existential types such as
-
-
-data T = forall a . MkT a (a &arw; Bool)
-
-
- This is represented by the Core declaration
-
-
-%data T = {MkT @a a (a &arw; Bool)}
-
-
- which introduces the nullary type constructor T and the data
- constructor
-
-
-MkT :: %forall a . a &arw; (a &arw; Bool) &arw; T
-
-
- In general, existentially quantified variables appear as extra
- universally quantified variables in the data contructor types. An
- example of how to construct and deconstruct values of type T is
- shown in the Expression Forms section.
-
-
-
- Newtypes
-
- Each Core %newtype declaration introduces a new type constructor
- and an associated representation type, corresponding to a source
- Haskell newtype declaration. However, unlike in source Haskell,
- a %newtype declaration does not introduce any data constructors.
-
- Each %newtype declaration also introduces a new coercion
- (syntactically, just another type constructor) that implies an
- axiom equating the type constructor, applied to any type
- variables bound by the %newtype, to the representation type.
-
- For example, the Haskell fragment
-
-
-newtype U = MkU Bool
-u = MkU True
-v = case u of
- MkU b &arw; not b
-
-
- might induce the Core fragment
-
-
-%newtype U ZCCoU = Bool;
-u :: U = %cast (True)
- ((%sym ZCCoU));
-v :: Bool = not (%cast (u) ZCCoU);
-
-
- The newtype declaration implies that the types U and Bool have
- equivalent representations, and the coercion axiom ZCCoU
- provides evidence that U is equivalent to Bool. Notice that in
- the body of u, the boolean value True is cast to type U using
- the primitive symmetry rule applied to ZCCoU: that is, using a
- coercion of kind Bool :=: U. And in the body of v, u is cast
- back to type Bool using the axiom ZCCoU.
-
- Notice that the case in the Haskell source code above translates
- to a cast in the corresponding Core code. That is because
- operationally, a case on a value whose type is declared by a
- newtype declaration is a no-op. Unlike a case on any other
- value, such a case does no evaluation: its only function is to
- coerce its scrutinee’s type.
-
- Also notice that unlike in a previous draft version of External
- Core, there is no need to handle recursive newtypes specially.
-
-
-
-
- Expression Forms
-
- Variables and data constructors are straightforward.
-
- Literal (lit) expressions consist of a literal value, in one of
- four different formats, and a (primitive) type annotation. Only
- certain combinations of format and type are permitted;
- see the Primitive Module section.
- The character and string formats can describe only 8-bit ASCII characters.
-
- Moreover, because the operational semantics for Core interprets
- strings as C-style null-terminated strings, strings should not
- contain embedded nulls.
-
- In Core, value applications, type applications, value
- abstractions, and type abstractions are all explicit. To tell
- them apart, type arguments in applications and formal type
- arguments in abstractions are preceded by an @ symbol. (In
- abstractions, the @ plays essentially the same role as the more
- usual Λ symbol.) For example, the Haskell source declaration
-
-
-f x = Leaf (Leaf x)
-
-
- might induce the Core declaration
-
-
-f :: %forall a . a &arw; BinTree (BinTree a) =
- \ @a (x::a) &arw; Leaf @(Bintree a) (Leaf @a x)
-
-
- Value applications may be of user-defined functions, data
- constructors, or primitives. None of these sorts of applications
- are necessarily saturated.
-
- Note that the arguments of type applications are not always of
- kind *. For example, given our previous definition of type A:
-
-
-data A f a = MkA (f a)
-
-
- the source code
-
-
-MkA (Leaf True)
-
-
- becomes
-
-
-(MkA @Bintree @Bool) (Leaf @Bool True)
-
-
- Local bindings, of a single variable or of a set of mutually
- recursive variables, are represented by %let expressions in the
- usual way.
-
- By far the most complicated expression form is %case. %case
- expressions are permitted over values of any type, although they
- will normally be algebraic or primitive types (with literal
- values). Evaluating a %case forces the evaluation of the
- expression being tested (the scrutinee). The value of the
- scrutinee is bound to the variable following the %of keyword,
- which is in scope in all alternatives; this is useful when the
- scrutinee is a non-atomic expression (see next example). The
- scrutinee is preceded by the type of the entire %case
- expression: that is, the result type that all of the %case
- alternatives have (this is intended to make type reconstruction
- easier in the presence of type equality coercions).
-
- In an algebraic %case, all the case alternatives must be labeled
- with distinct data constructors from the algebraic type,
- followed by any existential type variable bindings (see below),
- and typed term variable bindings corresponding to the data
- constructor’s arguments. The number of variables must match the
- data constructor’s arity.
-
- For example, the following Haskell source expression
-
-
-case g x of
- Fork l r &arw; Fork r l
- t@(Leaf v) &arw; Fork t t
-
-
- might induce the Core expression
-
-
-%case ((Bintree a)) g x %of (t::Bintree a)
- Fork (l::Bintree a) (r::Bintree a) &arw;
- Fork @a r l
- Leaf (v::a) &arw;
- Fork @a t t
-
-
- When performing a %case over a value of an
- existentially-quantified algebraic type, the alternative must
- include extra local type bindings for the
- existentially-quantified variables. For example, given
-
-
-data T = forall a . MkT a (a &arw; Bool)
-
-
- the source
-
-
-case x of
- MkT w g &arw; g w
-
-
- becomes
-
-
-%case x %of (x’::T)
- MkT @b (w::b) (g::b&arw;Bool) &arw; g w
-
-
- In a %case over literal alternatives, all the case alternatives
- must be distinct literals of the same primitive type.
-
- The list of alternatives may begin with a default alternative
- labeled with an underscore (%_), whose right-hand side will be
- evaluated if none of the other alternatives match. The default
- is optional except for in a case over a primitive type, or when
- there are no other alternatives. If the case is over neither an
- algebraic type nor a primitive type, then the list of
- alternatives must contain a default alternative and nothing
- else. For algebraic cases, the set of alternatives need not be
- exhaustive, even if no default is given; if alternatives are
- missing, this implies that GHC has deduced that they cannot
- occur.
-
- %cast is used to manipulate newtypes, as described in
- the Newtype section. The %cast expression
- takes an expression and a coercion: syntactically, the coercion
- is an arbitrary type, but it must have an equality kind. In an
- expression (cast e co), if e :: T and co has kind T :=: U, then
- the overall expression has type Ughc-fc-commentary. Here, co must be a
- coercion whose left-hand side is T.
-
- Note that unlike the %coerce expression that existed in previous
- versions of Core, this means that %cast is (almost) type-safe:
- the coercion argument provides evidence that can be verified by
- a typechecker. There are still unsafe %casts, corresponding to
- the unsafe %coerce construct that existed in old versions of
- Core, because there is a primitive unsafe coercion type that can
- be used to cast arbitrary types to each other. GHC uses this for
- such purposes as coercing the return type of a function (such as
- error) which is guaranteed to never return:
-
-
-case (error "") of
- True &arw; 1
- False &arw; 2
-
-
- becomes:
-
-
-%cast (error @ Bool (ZMZN @ Char))
-(%unsafe Bool Integer);
-
-
- %cast has no operational meaning and is only used in
- typechecking.
-
- A %note expression carries arbitrary internal information that
- GHC finds interesting. The information is encoded as a string.
- Expression notes currently generated by GHC include the inlining
- pragma (InlineMe) and cost-center labels for profiling.
-
- A %external expression denotes an external identifier, which has
- the indicated type (always expressed in terms of Haskell
- primitive types). External Core supports two kinds of external
- calls: %external and %dynexternal. Only the former is supported
- by the current set of stand-alone Core tools. In addition, there
- is a %label construct which GHC may generate but which the Core
- tools do not support.
-
- The present syntax for externals is sufficient for describing C
- functions and labels. Interfacing to other languages may require
- additional information or a different interpretation of the name
- string.
-
-
-
-
- Expression Evaluation
- The dynamic semantics of Core are defined on the type-erasure of
- the program: for example, we ignore all type abstractions and
- applications. The denotational semantics of the resulting
- type-free program are just the conventional ones for a
- call-by-name language, in which expressions are only evaluated
- on demand. But Core is intended to be a call-by-need language,
- in which expressions are only evaluated once. To express the
- sharing behavior of call-by-need, we give an operational model
- in the style of Launchbury launchbury93natural.
-
- This section describes the model informally; a more formal
- semantics is separately available as an executable interpreter.
-
- To simplify the semantics, we consider only well-behaved Core
- programs in which constructor and primitive applications are
- fully saturated, and in which non-trivial expresssions of
- unlifted kind (#) appear only as scrutinees in %case
- expressions. Any program can easily be put into this form; a
- separately available preprocessor illustrates how. In the
- remainder of this section, we use Core to mean well-behaved
- Core.
-
- Evaluating a Core expression means reducing it to weak-head normal form (WHNF),
- i.e., a primitive value, lambda abstraction,
- or fully-applied data constructor. Evaluating a program means
- evaluating the expression main:ZCMain.main.
-
- To make sure that expression evaluation is shared, we make use
- of a heap, which contains heap entries. A heap entry can be:
-
-
-
- A thunk, representing an unevaluated expression, also known
- as a suspension.
-
-
- A WHNF, representing an evaluated expression. The result of
- evaluating a thunk is a WHNF. A WHNF is always a closure
- (corresponding to a lambda abstraction in the source
- program) or a data constructor application: computations
- over primitive types are never suspended.
-
-
-
- Heap pointers point to heap entries: at different times, the
- same heap pointer can point to either a thunk or a WHNF, because
- the run-time system overwrites thunks with WHNFs as computation
- proceeds.
-
- The suspended computation that a thunk represents might
- represent evaluating one of three different kinds of expression.
- The run-time system allocates a different kind of thunk
- depending on what kind of expression it is:
-
-
-
- A thunk for a value definition has a group of suspended
- defining expressions, along with a list of bindings between
- defined names and heap pointers to those suspensions. (A
- value definition may be a recursive group of definitions or
- a single non-recursive definition, and it may be top-level
- (global) or let-bound (local)).
-
-
- A thunk for a function application (where the function is
- user-defined) has a suspended actual argument expression,
- and a binding between the formal argument and a heap pointer
- to that suspension.
-
-
- A thunk for a constructor application has a suspended actual
- argument expression; the entire constructed value has a heap
- pointer to that suspension embedded in it.
-
-
-
- As computation proceeds, copies of the heap pointer for a given
- thunk propagate through the executing program. When another
- computation demands the result of that thunk, the thunk is
- forced: the run-time system computes the thunk’s result,
- yielding a WHNF, and overwrites the heap entry for the thunk
- with the WHNF. Now, all copies of the heap pointer point to the
- new heap entry: a WHNF. Forcing occurs only in the context of
-
-
- evaluating the operator expression of an application;
- evaluating the scrutinee of a case expression; or
- evaluating an argument to a primitive or external function application
-
-
-
- When no pointers to a heap entry (whether it is a thunk or WHNF)
- remain, the garbage collector can reclaim the space it uses. We
- assume this happens implicitly.
-
- With the exception of functions, arrays, and mutable variables,
- we intend that values of all primitive types should be held
- unboxed: they should not be heap-allocated. This does not
- violate call-by-need semantics: all primitive types are
- unlifted, which means that values of those types must be
- evaluated strictly. Unboxed tuple types are not heap-allocated
- either.
-
- Certain primitives and %external functions cause side-effects to
- state threads or to the real world. Where the ordering of these
- side-effects matters, Core already forces this order with data
- dependencies on the pseudo-values representing the threads.
-
- An implementation must specially support the raisezh and
- handlezh primitives: for example, by using a handler stack.
- Again, real-world threading guarantees that they will execute in
- the correct order.
-
-
-
-
- Primitive Module
-
- The semantics of External Core rely on the contents and informal
- semantics of the primitive module ghc-prim:GHC.Prim. Nearly all
- the primitives are required in order to cover GHC’s implementation
- of the Haskell98 standard prelude; the only operators that can be
- completely omitted are those supporting the byte-code interpreter,
- parallelism, and foreign objects. Some of the concurrency
- primitives are needed, but can be given degenerate implementations
- if it desired to target a purely sequential backend (see Section
- the Non-concurrent Back End section).
-
- In addition to these primitives, a large number of C library
- functions are required to implement the full standard Prelude,
- particularly to handle I/O and arithmetic on less usual types.
-
- For a full listing of the names and types of the primitive
- operators, see the GHC library documentation ghcprim.
-
-
- Non-concurrent Back End
-
- The Haskell98 standard prelude doesn’t include any concurrency
- support, but GHC’s implementation of it relies on the existence
- of some concurrency primitives. However, it never actually forks
- multiple threads. Hence, the concurrency primitives can be given
- degenerate implementations that will work in a non-concurrent
- setting, as follows:
-
-
-
- ThreadIdzh can be represented by a singleton type, whose
- (unique) value is returned by myThreadIdzh.
-
-
- forkzh can just die with an unimplemented message.
-
-
- killThreadzh and yieldzh can also just die unimplemented
- since in a one-thread world, the only thread a thread can
- kill is itself, and if a thread yields the program hangs.
-
-
- MVarzh a can be represented by MutVarzh (Maybe a); where a
- concurrent implementation would block, the sequential
- implementation can just die with a suitable message (since
- no other thread exists to unblock it).
-
-
- waitReadzh and waitWritezh can be implemented using a select
- with no timeout.
-
-
-
-
-
- Literals
-
- Only the following combination of literal forms and types are
- permitted:
-
-
-
-
-
-
-
-
- Literal form
- Type
- Description
-
-
-
-
- integer
- Intzh
- Int
-
-
- Wordzh
- Word
-
-
- Addrzh
- Address
-
-
- Charzh
- Unicode character code
-
-
-
- rational
- Floatzh
- Float
-
-
- Doublezh
- Double
-
-
-
- character
- Charzh
- Unicode character specified by ASCII character
-
-
-
- string
- Addrzh
- Address of specified C-format string
-
-
-
-
-
-
-
-
-
-
-
- References
-
-
- ghc-user-guide
-
- The GHC Team
-
- The Glorious Glasgow Haskell Compilation System User's Guide, Version 6.8.2
- 2008
- http://www.haskell.org/ghc/docs/latest/html/users_guide/index.html
-
-
-
- ghc-fc-commentary
-
- GHC Wiki
-
- System FC: equality constraints and coercions
- 2006
- http://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/FC
-
-
-
- ghc-api
-
- Haskell Wiki
-
- Using GHC as a library
- 2007
- http://haskell.org/haskellwiki/GHC/As_a_library
-
-
-
- haskell98
-
- SimonPeyton-Jones
-
- Haskell 98 Language and Libraries: The Revised Report
-
- Cambridge University Press
-
- Cambridge>
- UK
-
-
- 2003
-
-
-
- system-fc
-
- MartinSulzmann
- Manuel M.T.Chakravarty
- SimonPeyton-Jones
- KevinDonnelly
-
- System F with type equality coercions
-
- ACM
-
- New York
- NY
- USA
-
-
- 53-66
- 2007
- http://portal.acm.org/citation.cfm?id=1190324
-
-
-
-
- gadts
-
- SimonPeyton-Jones
- DimitriosVytiniotis
- StephanieWeirich
- GeoffreyWashburn
-
- Simple unification-based type inference for GADTs
-
- ACM
-
- New York
- NY
- USA
-
-
- 50-61
- 2006
- http://research.microsoft.com/Users/simonpj/papers/gadt/index.htm
-
-
-
- Launchbury94
-
- JohnLaunchbury
- Simon L.Peyton-Jones
-
- Lazy Functional State Threads
- 24-35
- 1994
- http://citeseer.ist.psu.edu/article/launchbury93lazy.html
-
-
-
-
- pj:unboxed
-
- Simon L.Peyton-Jones
- JohnLaunchbury
- J.Hughes
-
- Unboxed Values as First Class Citizens in a Non-strict Functional Language
-
- Springer-Verlag LNCS523
-
- Cambridge
- Massachussetts
- USA
-
-
- 636-666
- 1991, August 26-28
- http://citeseer.ist.psu.edu/jones91unboxed.html
-
-
-
-
- ghc-inliner
-
- SimonPeyton-Jones
- SimonMarlow
-
- Secrets of the Glasgow Haskell Compiler inliner
- 1999
-
- Paris
- France
-
- http://research.microsoft.com/Users/simonpj/Papers/inlining/inline.pdf
-
-
-
-
- comp-by-trans-scp
-
- Simon L.Peyton-Jones
- A. L. M.Santos
-
- A transformation-based optimiser for Haskell
- Science of Computer Programming
- 32
- 1-3
- 3-47
- 1998
- http://citeseer.ist.psu.edu/peytonjones98transformationbased.html
-
-
-
- stg-machine
-
- Simon L.Peyton-Jones
-
- Implementing Lazy Functional Languages on Stock Hardware: The Spineless Tagless G-Machine
- Journal of Functional Programming
- 2
- 2
- 127-202
- 1992
- http://citeseer.ist.psu.edu/peytonjones92implementing.html
-
-
-
- launchbury93natural
-
- JohnLaunchbury
-
- A Natural Semantics for Lazy Evaluation
- 144-154
-
- Charleston
- South Carolina
-
- 1993
- http://citeseer.ist.psu.edu/launchbury93natural.html
-
-
-
-
- ghcprim
-
- The GHC Team
-
- Library documentation: GHC.Prim
- 2008
- http://www.haskell.org/ghc/docs/latest/html/libraries/base/GHC-Prim.html
-
-
-
-
diff --git a/docs/users_guide/ffi-chap.xml b/docs/users_guide/ffi-chap.xml
index 4d91947c7204..e7d5a0c37d83 100644
--- a/docs/users_guide/ffi-chap.xml
+++ b/docs/users_guide/ffi-chap.xml
@@ -213,6 +213,40 @@ newtype {-# CTYPE "useconds_t" #-} T = ...
+
+
+ hs_thread_done()
+
+
+void hs_thread_done(void);
+
+
+
+ GHC allocates a small amount of thread-local memory when a
+ thread calls a Haskell function via a foreign
+ export. This memory is not normally freed until
+ hs_exit(); the memory is cached so that
+ subsequent calls into Haskell are fast. However, if your
+ application is long-running and repeatedly creates new
+ threads that call into Haskell, you probably want to arrange
+ that this memory is freed in those threads that have
+ finished calling Haskell functions. To do this, call
+ hs_thread_done() from the thread whose
+ memory you want to free.
+
+
+
+ Calling hs_thread_done() is entirely
+ optional. You can call it as often or as little as you
+ like. It is safe to call it from a thread that has never
+ called any Haskell functions, or one that never will. If
+ you forget to call it, the worst that can happen is that
+ some memory remains allocated until
+ hs_exit() is called. If you call it too
+ often, the worst that can happen is that the next call to a
+ Haskell function incurs some extra overhead.
+
+
diff --git a/docs/users_guide/flags.xml b/docs/users_guide/flags.xml
index 3a69c8f55082..8381ca12546b 100644
--- a/docs/users_guide/flags.xml
+++ b/docs/users_guide/flags.xml
@@ -590,7 +590,7 @@
- P
+ PCompile to be part of package Pstatic-
@@ -704,9 +704,15 @@
+
+ n
+ set the limit for context reduction. Default is 20.
+ dynamic
+
+
- Enable most language extensions; see for exactly which ones.
+ Deprecated. Enable most language extensions; see for exactly which ones.dynamic
@@ -717,10 +723,10 @@
- n
- set the limit for context reduction. Default is 20.
+
+ Enable Safe Haskell trusted package requirement for trustworthy modules.dynamic
-
+ n
@@ -745,29 +751,80 @@
- Automatically derive Typeable instances for every datatype and type class declaration.
+ Automatically derive Typeable instances for every datatype and type class declaration.
Implies .dynamic
+
+
+ Enable bang patterns.
+ dynamic
+
+
+
+
+ Enable support for binary literals.
+ dynamic
+
+
+
+
+ Enable the CAPI calling convention.
+ dynamic
+
+
+
+
+ Enable constrained class methods.
+ dynamic
+
+ Enable a kind of constraints.dynamic
+
+
+ Enable the C preprocessor.
+ dynamic
+
+ Enable datatype promotion.dynamic
+
+
+ Enable default signatures.
+ dynamic
+
+
- Enable deriving for the Data and Typeable classes.
+ Enable deriving for the Data and Typeable classes.
+ Implied by .dynamic
+
+
+ Enable deriving for the Functor class.
+ Implied by .
+ dynamic
+
+
+
+
+ Enable deriving for the Foldable class.
+ Implied by .
+ dynamic
+
+ Enable deriving for the Generic class.
@@ -775,42 +832,93 @@
-
- Enable newtype deriving.
+
+ Enable deriving for the Traversable class.
+ Implies and .dynamic
-
+
- Enable record
- field disambiguation
+ Enable record field disambiguation.
+ Implied by .dynamic
- Allow empty case alternatives
-
+ Allow empty case alternatives.dynamic
+
+
+ Enable empty data declarations.
+ dynamic
+
+
+
+
+ Enable existential quantification.
+ dynamic
+
+
+
+
+ Enable explicit universal quantification.
+ Implied by ,
+ ,
+ and
+ .
+
+ dynamic
+
+
+
+
+ Enable using the keyword type to specify the namespace of
+ entries in imports and exports ().
+ Implied by and .
+ dynamic
+
+
- Use GHCi's extended default rules in a normal module
+ Use GHCi's extended default rules in a normal module.dynamic
+
+
+ Enable flexible contexts.
+ Implied by .
+ dynamic
+
+
+
+
+ Enable flexible instances.
+ Implies . Implied by .
+ dynamic
+
+
- Enable foreign function interface (implied by
- )
+ Enable foreign function interface.dynamic
+
+
+ Enable functional dependencies.
+ Implies .
+ dynamic
+
+ Enable generalised algebraic data types.
-
+ Implies and .
dynamic
@@ -821,6 +929,12 @@
dynamic
+
+
+ Enable newtype deriving.
+ dynamic
+
+ Deprecated, does nothing. No longer enables generic classes.
@@ -832,103 +946,73 @@
Enable Implicit Parameters.
- Implied by .
+ Implies and .dynamic
- Don't implicitly import Prelude
+ Don't implicitly import Prelude.
+ Implied by .dynamic
-
- Enable incoherent instances.
- Implies
- dynamic
-
-
-
-
- Disable the monomorphism restriction
- dynamic
-
-
-
-
- Enable support for negative literals
- dynamic
-
-
-
-
- Disable support for n+k patterns
- dynamic
-
-
-
-
- Enable support for 'fractional' integer literals
- dynamic
-
-
-
-
- Enable overlapping instances
+
+ Enable impredicative types.
+ Implies .dynamic
-
+
-
- Enable overloaded lists.
-
+
+ Enable incoherent instances.
+ Implies .dynamic
-
+
-
- Enable overloaded string literals.
-
+
+ Enable instance signatures.dynamic
-
+
-
- Enable quasiquotation.
+
+ Enable interruptible FFI.dynamic
-
+
-
- Relaxed checking for mutually-recursive polymorphic functions
+
+ Enable kind signatures.
+ Implied by and .dynamic
-
+
-
- Disable support for traditional record syntax (as supported by Haskell 98) C {f = x}
+
+ Enable lambda-case expressions.dynamic
-
+
-
- Enable type families.
+
+ Enable liberalised type synonyms.dynamic
-
+
-
- Enable undecidable instances
+
+ Allow "#" as a postfix modifier on identifiers.dynamic
-
+
-
- Enable kind polymorphism.
- Implies .
+
+ Enable monad comprehensions.dynamic
-
+
@@ -937,165 +1021,159 @@
dynamic
-
+
-
- Employ rebindable syntax
+
+ Disable the monomorphism restriction.dynamic
-
+
-
- Enable lexically-scoped type variables.
- Implied by .
+
+ Enable multi parameter type classes.
+ Implied by .dynamic
-
+
-
- Enable Template Haskell.
- No longer implied by .
+
+ Enable multi-way if-expressions.dynamic
-
+
-
- Enable bang patterns.
+
+ Enable record puns.dynamic
-
+
-
- Enable the C preprocessor.
+
+ Enable support for negative literals.dynamic
-
+
-
- Enable pattern guards.
+
+ Disable support for n+k patterns.dynamic
-
+
-
- Enable view patterns.
+
+ Deprecated, does nothing. nullary (no parameter) type classes are now enabled using .dynamic
-
+
-
- Enable unicode syntax.
+
+ Enable support for 'fractional' integer literals.dynamic
-
+
-
- Allow "#" as a postfix modifier on identifiers.
+
+ Enable overlapping instances.dynamic
-
+
-
- Enable explicit universal quantification.
- Implied by ,
- ,
- ,
-
+
+ Enable overloaded lists.
dynamic
-
-
-
-
- Enable polymorphic components for data constructors.
- dynamic, synonym for
-
+
-
- Enable rank-2 types.
- dynamic, synonym for
-
+
+ Enable overloaded string literals.
+
+ dynamic
+
-
- Enable rank-N types.
+
+ Enable package-qualified imports.dynamic
-
+
-
- Enable impredicative types.
+
+ Enable parallel arrays.
+ Implies .dynamic
-
+
-
- Enable existential quantification.
+
+ Enable parallel list comprehensions.
+ Implied by .dynamic
-
+
-
- Enable kind signatures.
+
+ Enable pattern guards.dynamic
-
+
-
- Enable empty data declarations.
+
+ Enable pattern synonyms.dynamic
-
+
-
- Enable parallel list comprehensions.
+
+ Enable kind polymorphism.
+ Implies .dynamic
-
+
-
- Enable generalised list comprehensions.
- dynamic
-
+
+ Enable polymorphic components for data constructors.
+ dynamic, synonym for
+
-
- Enable monad comprehensions.
+
+ Enable postfix operators.dynamic
-
+
-
- Enable unlifted FFI types.
+
+ Enable quasiquotation.dynamic
-
+
-
- Enable interruptible FFI.
- dynamic
-
+
+ Enable rank-2 types.
+ dynamic, synonym for
+
-
- Enable liberalised type synonyms.
+
+ Enable rank-N types.
+ Implied by .dynamic
-
+
-
- Enable type operators.
+
+ Employ rebindable syntax.
+ Implies .dynamic
-
+
-
- Enable using the keyword type to specify the namespace of
- entries in imports and exports ().
- Implied by and .
+
+ Enable record wildcards.
+ Implies .dynamic
-
+
@@ -1104,34 +1182,30 @@
-
- Enable parallel arrays.
- dynamic
-
-
-
-
- Enable record wildcards.
+
+ (deprecated) Relaxed checking for
+ mutually-recursive polymorphic functions.dynamic
-
+
-
- Enable record puns.
+
+ Enable role annotations.dynamic
-
+
-
- Enable record field disambiguation.
+
+ Enable the Safe Haskell Safe mode.dynamic
-
+
-
- Enable unboxed tuples.
+
+ Enable lexically-scoped type variables.
+ dynamic
-
+
@@ -1140,83 +1214,80 @@
-
- Enable type synonyms in instance heads.
- dynamic
-
-
-
-
- Enable flexible contexts.
+
+ Enable Template Haskell.dynamic
-
+
-
- Enable flexible instances.
- Implies
+
+ Disable support for traditional record syntax (as supported by Haskell 98) C {f = x}dynamic
-
+
-
- Enable constrained class methods.
+
+ Enable generalised list comprehensions.dynamic
-
+
-
- Enable default signatures.
+
+ Enable the Safe Haskell Trustworthy mode.dynamic
-
+
-
- Enable multi parameter type classes.
+
+ Enable tuple sections.dynamic
-
+
-
- Enable nullary (no parameter) type classes.
+
+ Enable type families.
+ Implies ,
+ and .dynamic
-
+
-
- Enable functional dependencies.
+
+ Enable type operators.
+ Implies .dynamic
-
+
-
- Enable package-qualified imports.
+
+ Enable type synonyms in instance heads.
+ Implied by .dynamic
-
+
-
- Enable lambda-case expressions.
+
+ Enable unboxed tuples.dynamic
-
+
-
- Enable multi-way if-expressions.
+
+ Enable undecidable instances.dynamic
-
+
-
- Enable the Safe Haskell Safe mode.
+
+ Enable unicode syntax.dynamic
-
+
-
- Enable the Safe Haskell Trustworthy mode.
+
+ Enable unlifted FFI types.dynamic
-
+
@@ -1225,10 +1296,10 @@
-
- Enable Safe Haskell trusted package requirement for trustworthy modules.
+
+ Enable view patterns.dynamic
-
+
@@ -1516,7 +1587,7 @@
- warn on definitions conflicting with the Applicative-Monad Proposal (AMP)
+ (deprecated) warn on definitions conflicting with the Applicative-Monad Proposal (AMP)dynamic
@@ -1890,6 +1961,43 @@
+
+ =n
+ Set the maximum size of inline array allocations to
+ n bytes (default: 128). GHC
+ will allocate non-pinned arrays of statically known size
+ in the current nursery block if they're no bigger than
+ n bytes, ignoring GC overheap.
+ This value should be quite a bit smaller than the block
+ size (typically: 4096).
+ dynamic
+ -
+
+
+
+
+ =n
+
+ Inline memcpy calls if they would generate no more
+ than n pseudo instructions
+ (default: 32).
+
+ dynamic
+ -
+
+
+
+
+ =n
+
+ Inline memset calls if they would generate no more
+ than n pseudo instructions
+ (default: 32).
+
+ dynamic
+ -
+
+
@@ -2106,6 +2214,12 @@
dynamic-
+
+
+ Always write interface files
+ dynamic
+ -
+ Generate byte-code
@@ -2515,12 +2629,6 @@
dynamic-
-
- option
- pass option to the mangler
- dynamic
- -
- optionpass option to the assembler
@@ -2573,42 +2681,6 @@
-
-
-
-
- (x86 only) give some registers back to the C compiler
- dynamic
- -
-
-
-
-
-
-
-
-
- External core file options
-
-
-
-
-
-
-
- Flag
- Description
- Static/Dynamic
- Reverse
-
-
-
-
-
- Generate .hcr external Core files
- dynamic
- -
-
-
@@ -3023,7 +3095,7 @@
-
+ When compiling with --make, compile N modules in parallel.dynamic-
diff --git a/docs/users_guide/ghci.xml b/docs/users_guide/ghci.xml
index 912ecb25ce7d..729f96f24422 100644
--- a/docs/users_guide/ghci.xml
+++ b/docs/users_guide/ghci.xml
@@ -2432,7 +2432,9 @@ Prelude> :. cmds.ghci
Opens an editor to edit the file
file, or the most recently loaded
- module if file is omitted. The
+ module if file is omitted.
+ If there were errors during the last loading,
+ the cursor will be positioned at the line of the first error. The
editor to invoke is taken from the EDITOR
environment variable, or a default editor on your system if
EDITOR is not set. You can change the
@@ -3294,12 +3296,38 @@ Prelude> :set -fno-warn-incomplete-patterns -XNoMultiParamTypeClasses
Setting options for interactive evaluation only
- GHCi actually maintains two sets of options: one set that
- applies when loading modules, and another set that applies for
- expressions and commands typed at the prompt. The
- :set command modifies both, but there is
+ GHCi actually maintains two sets of options:
+
+
+ The loading options apply when loading modules
+
+
+ The interactive options apply when evaluating expressions and commands typed at the GHCi prompt.
+
+
+The :set command modifies both, but there is
also a :seti command (for "set
- interactive") that affects only the second set.
+ interactive") that affects only the interactive options set.
+
+
+
+ It is often useful to change the interactive options,
+ without having that option apply to loaded modules
+ too. For example
+
+:seti -XMonoLocalBinds
+
+ It would be undesirable if were to
+ apply to loaded modules too: that might cause a compilation error, but
+ more commonly it will cause extra recompilation, because GHC will think
+ that it needs to recompile the module because the flags have changed.
+
+
+
+ If you are setting language options in your .ghci file, it is good practice
+ to use :seti rather than :set,
+ unless you really do want them to apply to all modules you
+ load in GHCi.
@@ -3307,8 +3335,6 @@ Prelude> :set -fno-warn-incomplete-patterns -XNoMultiParamTypeClasses
:set and :seti commands
respectively, with no arguments. For example, in a clean GHCi
session we might see something like this:
-
-
Prelude> :seti
base language is: Haskell2010
@@ -3322,38 +3348,24 @@ other dynamic, non-language, flag settings:
-fimplicit-import-qualified
warning settings:
-
- Note that the option
- is on, because we apply special defaulting rules to
+
+
+The two sets of options are initialised as follows. First, both sets of options
+are initialised as described in .
+Then the interactive options are modified as follows:
+
+
+ The option
+ is enabled, in order to apply special defaulting rules to
expressions typed at the prompt (see ).
-
-
-
- Furthermore, the Monomorphism Restriction is disabled by default in
- GHCi (see ).
-
-
-
- It is often useful to change the language options for expressions typed
- at the prompt only, without having that option apply to loaded modules
- too. For example
-
-:seti -XMonoLocalBinds
-
- It would be undesirable if were to
- apply to loaded modules too: that might cause a compilation error, but
- more commonly it will cause extra recompilation, because GHC will think
- that it needs to recompile the module because the flags have changed.
-
+
-
- It is therefore good practice if you are setting language
- options in your .ghci file, to use
- :seti rather than :set
- unless you really do want them to apply to all modules you
- load in GHCi.
-
+
+ The Monomorphism Restriction is disabled (see ).
+
+
+
diff --git a/docs/users_guide/glasgow_exts.xml b/docs/users_guide/glasgow_exts.xml
index 51174171523a..bfdeea4e5879 100644
--- a/docs/users_guide/glasgow_exts.xml
+++ b/docs/users_guide/glasgow_exts.xml
@@ -480,6 +480,26 @@ Indeed, the bindings can even be recursive.
+
+ Binary integer literals
+
+ Haskell 2010 and Haskell 98 allows for integer literals to
+ be given in decimal, octal (prefixed by
+ 0o or 0O), or
+ hexadecimal notation (prefixed by 0x or
+ 0X).
+
+
+
+ The language extension
+ adds support for expressing integer literals in binary
+ notation with the prefix 0b or
+ 0B. For instance, the binary integer
+ literal 0b11001001 will be desugared into
+ fromInteger 201 when
+ is enabled.
+
+
@@ -856,9 +876,11 @@ y) will not be coalesced.
-Pattern synonyms are enabled by the flag -XPatternSynonyms.
-More information and examples of view patterns can be found on the
-Wiki
+Pattern synonyms are enabled by the flag
+-XPatternSynonyms, which is required for both
+defining them and using them. More information
+and examples of view patterns can be found on the Wiki
page.
@@ -965,65 +987,89 @@ Which enables us to rewrite our functions in a much cleaner style:
In this case, Headx
cannot be used in expressions, only patterns, since it wouldn't
specify a value for the xs on the
-right-hand side.
+right-hand side. We can give an explicit inversion of a pattern
+synonym using the following syntax:
-
-The semantics of a unidirectional pattern synonym declaration and
-usage are as follows:
+
+ pattern Head x <- x:xs where
+ Head x = [x]
+
-
+
+The syntax and semantics of pattern synonyms are elaborated in the
+following subsections.
+See the Wiki
+page for more details.
+
- Syntax:
+Syntax and scoping of pattern synonyms
A pattern synonym declaration can be either unidirectional or
bidirectional. The syntax for unidirectional pattern synonyms is:
-
pattern Name args <- pat
-
and the syntax for bidirectional pattern synonyms is:
-
pattern Name args = pat
+ or
+
+ pattern Name args <- pat where
+ Name args = expr
+ Either prefix or infix syntax can be
+ used.
+
Pattern synonym declarations can only occur in the top level of a
module. In particular, they are not allowed as local
definitions. Currently, they also don't work in GHCi, but that is a
technical restriction that will be lifted in later versions.
+
+ The variables in the left-hand side of the definition are bound by
+ the pattern on the right-hand side. For implicitly bidirectional
+ pattern synonyms, all the variables of the right-hand side must also
+ occur on the left-hand side; also, wildcard patterns and view
+ patterns are not allowed. For unidirectional and
+ explicitly-bidirectional pattern synonyms, there is no restriction
+ on the right-hand side pattern.
+
+
+
+ Pattern synonyms cannot be defined recursively.
+
+
+
+Import and export of pattern synonyms
+
The name of the pattern synonym itself is in the same namespace as
- proper data constructors. Either prefix or infix syntax can be
- used. In export/import specifications, you have to prefix pattern
+ proper data constructors. In an export or import specification,
+ you must prefix pattern
names with the pattern keyword, e.g.:
-
module Example (pattern Single) where
pattern Single x = [x]
-
-
- Scoping:
-
-
- The variables in the left-hand side of the definition are bound by
- the pattern on the right-hand side. For bidirectional pattern
- synonyms, all the variables of the right-hand side must also occur
- on the left-hand side; also, wildcard patterns and view patterns are
- not allowed. For unidirectional pattern synonyms, there is no
- restriction on the right-hand side pattern.
+Without the pattern prefix, Single would
+be interpreted as a type constructor in the export list.
-
- Pattern synonyms cannot be defined recursively.
+You may also use the pattern keyword in an import/export
+specification to import or export an ordinary data constructor. For example:
+
+ import Data.Maybe( pattern Just )
+
+would bring into scope the data constructor Just from the
+Maybe type, without also bringing the type constructor
+Maybe into scope.
+
-
-
- Typing:
+Typing of pattern synonyms
Given a pattern synonym definition of the form
@@ -1098,10 +1144,9 @@ pattern (Show b) => ExNumPat b :: (Num a, Eq a) => T a
ExNumPat :: (Show b, Num a, Eq a) => b -> T t
+
-
-
- Matching:
+Matching of pattern synonyms
A pattern synonym occurrence in a pattern is evaluated by first
@@ -1123,8 +1168,6 @@ f' _ = False
Note that the strictness of f differs from that
of g defined below:
-
-
g [True, True] = True
g _ = False
@@ -1134,9 +1177,8 @@ g _ = False
*Main> g (False:undefined)
False
-
-
+
@@ -1881,7 +1923,8 @@ the comprehension being over an arbitrary monad.
functions (>>=),
(>>), and fail,
are in scope (not the Prelude
- versions). List comprehensions, mdo (), and parallel array
+ versions). List comprehensions, mdo
+ (), and parallel array
comprehensions, are unaffected.
@@ -2389,7 +2432,36 @@ necessary to enable them.
- Package-qualified imports
+Import and export extensions
+
+
+ Hiding things the imported module doesn't export
+
+
+Technically in Haskell 2010 this is illegal:
+
+module A( f ) where
+ f = True
+
+module B where
+ import A hiding( g ) -- A does not export g
+ g = f
+
+The import A hiding( g ) in module B
+is technically an error (Haskell Report, 5.3.1)
+because A does not export g.
+However GHC allows it, in the interests of supporting backward compatibility; for example, a newer version of
+A might export g, and you want B to work
+in either case.
+
+
+The warning -fwarn-dodgy-imports, which is off by default but included with -W,
+warns if you hide something that the imported module does not export.
+
+
+
+
+ Package-qualified importsWith the flag, GHC allows
import declarations to be qualified by the package name that the
@@ -2412,10 +2484,12 @@ import "network" Network.Socket
added mainly so that we can build backwards-compatible versions of
packages when APIs change. It can lead to fragile dependencies in
the common case: modules occasionally move from one package to
- another, rendering any package-qualified imports broken.
-
+ another, rendering any package-qualified imports broken.
+ See also for
+ an alternative way of disambiguating between module names.
+
-
+Safe importsWith the ,
@@ -2433,9 +2507,9 @@ import safe qualified Network.Socket as NS
safely imported. For a description of when a import is
considered safe see
-
+
-
+Explicit namespaces in import/export In an import or export list, such as
@@ -2463,6 +2537,14 @@ disambiguate this case, thus:
The extension
is implied by and (for some reason) by .
+
+In addition, with you can prefix the name of
+a data constructor in an import or export list with the keyword pattern,
+to allow the import or export of a data constructor without its parent type constructor
+(see ).
+
+
+
@@ -3752,8 +3834,16 @@ GHC now allows stand-alone deriving declarations, enabled by
The syntax is identical to that of an ordinary instance declaration apart from (a) the keyword
deriving, and (b) the absence of the where part.
-Note the following points:
+
+
+However, standalone deriving differs from a deriving clause in a number
+of important ways:
+The standalone deriving declaration does not need to be in the
+same module as the data type declaration. (But be aware of the dangers of
+orphan instances ().
+
+
You must supply an explicit context (in the example the context is (Eq a)),
exactly as you would in an ordinary instance declaration.
@@ -3761,12 +3851,6 @@ exactly as you would in an ordinary instance declaration.
attached to a data type declaration, the context is inferred.)
-
-A deriving instance declaration
-must obey the same rules concerning form and termination as ordinary instance declarations,
-controlled by the same flags; see .
-
-
Unlike a deriving
declaration attached to a data declaration, the instance can be more specific
@@ -3789,6 +3873,8 @@ declaration attached to a data declaration,
GHC does not restrict the form of the data type. Instead, GHC simply generates the appropriate
boilerplate code for the specified class, and typechecks it. If there is a type error, it is
your problem. (GHC will show you the offending code if it has a type error.)
+
+
The merit of this is that you can derive instances for GADTs and other exotic
data types, providing only that the boilerplate code does indeed typecheck. For example:
@@ -3803,7 +3889,24 @@ data type declaration for T,
because T is a GADT, but you can generate
the instance declaration using stand-alone deriving.
+
+The down-side is that,
+if the boilerplate code fails to typecheck, you will get an error message about that
+code, which you did not write. Whereas, with a deriving clause
+the side-conditions are necessarily more conservative, but any error message
+may be more comprehensible.
+
+
+
+
+In other ways, however, a standalone deriving obeys the same rules as ordinary deriving:
+
+
+A deriving instance declaration
+must obey the same rules concerning form and termination as ordinary instance declarations,
+controlled by the same flags; see .
+The stand-alone syntax is generalised for newtypes in exactly the same
@@ -3821,9 +3924,8 @@ GHC always treats the last parameter of the instance
-
-
-Deriving clause for extra classes (Typeable, Data, etc)
+
+Deriving instances of extra classes (Data, etc)
Haskell 98 allows the programmer to add "deriving( Eq, Ord )" to a data type
@@ -3835,27 +3937,6 @@ classes Eq, Ord,
GHC extends this list with several more classes that may be automatically derived:
- With , you can derive instances of the classes
-Typeable, and Data, defined in the library
-modules Data.Typeable and Data.Data respectively.
-
-Since GHC 7.8.1, Typeable is kind-polymorphic (see
-) and can be derived for any datatype and
-type class. Instances for datatypes can be derived by attaching a
-deriving Typeable clause to the datatype declaration, or by
-using standalone deriving (see ).
-Instances for type classes can only be derived using standalone deriving.
-For data families, Typeable should only be derived for the
-uninstantiated family type; each instance will then automatically have a
-Typeable instance too.
-See also .
-
-
-Also since GHC 7.8.1, handwritten (ie. not derived) instances of
-Typeable are forbidden, and will result in an error.
-
-
-
With , you can derive
instances of the classes Generic and
Generic1, defined in GHC.Generics.
@@ -3868,6 +3949,12 @@ the class Functor,
defined in GHC.Base.
+ With , you can derive instances of
+the class Data,
+defined in Data.Data. See for
+deriving Typeable.
+
+
With , you can derive instances of
the class Foldable,
defined in Data.Foldable.
@@ -3875,24 +3962,73 @@ defined in Data.Foldable.
With , you can derive instances of
the class Traversable,
-defined in Data.Traversable.
+defined in Data.Traversable. Since the Traversable
+instance dictates the instances of Functor and
+Foldable, you'll probably want to derive them too, so
+ implies
+ and .
+You can also use a standalone deriving declaration instead
+(see ).
+
+
In each case the appropriate class must be in scope before it
can be mentioned in the deriving clause.
-
-Automatically deriving Typeable instances
+
+Deriving Typeable instances
-
+The class Typeable is very special:
+
+
+Typeable is kind-polymorphic (see
+).
+
+
+
+Only derived instances of Typeable are allowed;
+i.e. handwritten instances are forbidden. This ensures that the
+programmer cannot subert the type system by writing bogus instances.
+
+
+
+With
+GHC allows you to derive instances of Typeable for data types or newtypes,
+using a deriving clause, or using
+a standalone deriving declaration ().
+
+
+
+With , deriving Typeable for a data
+type (whether via a deriving clause or standalone deriving)
+also derives Typeable for the promoted data constructors ().
+
+
+
+However, using standalone deriving, you can also derive
+a Typeable instance for a data family.
+You may not add a deriving(Typeable) clause to a
+data instance declaration; instead you must use a
+standalone deriving declaration for the data family.
+
+
+
+Using standalone deriving, you can also derive
+a Typeable instance for a type class.
+
+
+
The flag triggers the generation
-of derived Typeable instances for every datatype and type
-class declaration in the module it is used. It will also generate
-Typeable instances for any promoted data constructors
-(). This flag implies
- ().
+of derived Typeable instances for every datatype, data family,
+and type class declaration in the module it is used, unless a manually-specified one is
+already provided.
+This flag implies .
+
+
+
@@ -4018,47 +4154,52 @@ the newtype and its representation.
A more precise specification
-Derived instance declarations are constructed as follows. Consider the
-declaration (after expansion of any type synonyms)
+A derived instance is derived only for declarations of these forms (after expansion of any type synonyms)
- newtype T v1...vn = T' (t vk+1...vn) deriving (c1...cm)
+ newtype T v1..vn = MkT (t vk+1..vn) deriving (C t1..tj)
+ newtype instance T s1..sk vk+1..vn = MkT (t vk+1..vn) deriving (C t1..tj)
-
where
- The ci are partial applications of
- classes of the form C t1'...tj', where the arity of C
+v1..vn are type variables, and t,
+s1..sk, t1..tj are types.
+
+
+ The (C t1..tj) is a partial applications of the class C,
+ where the arity of C
is exactly j+1. That is, C lacks exactly one type argument.
- The k is chosen so that ci (T v1...vk) is well-kinded.
+ k is chosen so that C t1..tj (T v1...vk) is well-kinded.
+(Or, in the case of a data instance, so that C t1..tj (T s1..sk) is
+well kinded.)
The type t is an arbitrary type.
- The type variables vk+1...vn do not occur in t,
- nor in the ci, and
+ The type variables vk+1...vn do not occur in the types t,
+ s1..sk, or t1..tj.
- None of the ci is Read, Show,
+ C is not Read, Show,
Typeable, or Data. These classes
should not "look through" the type or its constructor. You can still
derive these classes for a newtype, but it happens in the usual way, not
via this new mechanism.
- It is safe to coerce each of the methods of ci. That is,
- the missing last argument to each of the ci is not used
- at a nominal role in any of the ci's methods.
+ It is safe to coerce each of the methods of C. That is,
+ the missing last argument to C is not used
+ at a nominal role in any of the C's methods.
(See .)
-Then, for each ci, the derived instance
+Then the derived instance is of form
declaration is:
- instance ci t => ci (T v1...vk)
+ instance C t1..tj t => C t1..tj (T v1...vk)
As an example which does not work, consider
@@ -4260,7 +4401,9 @@ We use default signatures to simplify generic programming in GHC
Nullary type classes
-Nullary (no parameter) type classes are enabled with .
+Nullary (no parameter) type classes are enabled with
+; historically, they were enabled with the
+(now deprecated) .
Since there are no available parameters, there can be at most one instance
of a nullary class. A nullary type class might be used to document some assumption
in a type signature (such as reliance on the Riemann hypothesis) or add some
@@ -4881,40 +5024,128 @@ with N.
In general, as discussed in ,
GHC requires that it be unambiguous which instance
declaration
-should be used to resolve a type-class constraint. This behaviour
-can be modified by two flags:
+should be used to resolve a type-class constraint.
+GHC also provides a way to to loosen
+the instance resolution, by
+allowing more than one instance to match, provided there is a most
+specific one. Moreover, it can be loosened further, by allowing more than one instance to match
+irespective of whether there is a most specific one.
+This section gives the details.
+
+
+To control the choice of instance, it is possible to specify the overlap behavior for individual
+instances with a pragma, written immediately after the
+instance keyword. The pragma may be one of:
+{-# OVERLAPPING #-},
+{-# OVERLAPPABLE #-},
+{-# OVERLAPS #-},
+or {-# INCOHERENT #-}.
+
+
+The matching behaviour is also influenced by two module-level language extension flags:
-XOverlappingInstances
and
-XIncoherentInstances
-, as this section discusses. Both these
-flags are dynamic flags, and can be set on a per-module basis, using
-an LANGUAGE pragma if desired ().
+. These flags are now deprecated (since GHC 7.10) in favour of
+the fine-grained per-instance pragmas.
+
+
-The flag instructs GHC to loosen
-the instance resolution described in , by
-allowing more than one instance to match, provided there is a most
-specific one. The flag
-further loosens the resolution, by allowing more than one instance to match,
-irespective of whether there is a most specific one.
+A more precise specification is as follows.
+The willingness to be overlapped or incoherent is a property of
+the instance declaration itself, controlled as follows:
+
+An instance is incoherent if: it has an INCOHERENT pragma; or if the instance has no pragma and it appears in a module compiled with -XIncoherentInstances.
+
+An instance is overlappable if: it has an OVERLAPPABLE or OVERLAPS pragma; or if the instance has no pragma and it appears in a module compiled with -XOverlappingInstances; or if the instance is incoherent.
+
+An instance is overlapping if: it has an OVERLAPPING or OVERLAPS pragma; or if the instance has no pragma and it appears in a module compiled with -XOverlappingInstances; or if the instance is incoherent.
+
+
-For example, consider
+Now suppose that, in some client module, we are searching for an instance of the
+target constraint(C ty1 .. tyn).
+The search works like this.
+
+
+Find all instances I that match the target constraint;
+that is, the target constraint is a substitution instance of I. These
+instance declarations are the candidates.
+
+
+
+Eliminate any candidate IX for which both of the following hold:
+
+
+ There is another candidate IY that is strictly more specific;
+ that is, IY is a substitution instance of IX but not vice versa.
+
+
+ Either IX is overlappable, or IY is
+ overlapping. (This "either/or" design, rather than a "both/and" design,
+ allow a client to deliberately override an instance from a library, without requiring a change to the library.)
+
+
+
+
+
+
+If exactly one non-incoherent candidate remains, select it. If all
+remaining candidates are incoherent, select an arbitary
+one. Otherwise the search fails (i.e. when more than one surviving candidate is not incoherent).
+
+
+
+If the selected candidate (from the previous step) is incoherent, the search succeeds, returning that candidate.
+
+
+
+If not, find all instances that unify with the target
+constraint, but do not match it.
+Such non-candidate instances might match when the target constraint is further
+instantiated. If all of them are incoherent, the search succeeds, returning the selected candidate;
+if not, the search fails.
+
+
+
+Notice that these rules are not influenced by flag settings in the client module, where
+the instances are used.
+These rules make it possible for a library author to design a library that relies on
+overlapping instances without the client having to know.
+
+
+Errors are reported lazily (when attempting to solve a constraint), rather than eagerly
+(when the instances themselves are defined). Consider, for example
- instance context1 => C Int b where ... -- (A)
- instance context2 => C a Bool where ... -- (B)
- instance context3 => C a [b] where ... -- (C)
- instance context4 => C Int [Int] where ... -- (D)
+ instance C Int b where ..
+ instance C a Bool where ..
+
+These potentially overlap, but GHC will not complain about the instance declarations
+themselves, regardless of flag settings. If we later try to solve the constraint
+(C Int Char) then only the first instance matches, and all is well.
+Similarly with (C Bool Bool). But if we try to solve (C Int Bool),
+both instances match and an error is reported.
+
+
+
+As a more substantial example of the rules in action, consider
+
+ instance {-# OVERLAPPABLE #-} context1 => C Int b where ... -- (A)
+ instance {-# OVERLAPPABLE #-} context2 => C a Bool where ... -- (B)
+ instance {-# OVERLAPPABLE #-} context3 => C a [b] where ... -- (C)
+ instance {-# OVERLAPPING #-} context4 => C Int [Int] where ... -- (D)
-compiled with enabled. The constraint
-C Int [Int] matches instances (A), (C) and (D), but the last
+Now suppose that the type inference
+engine needs to solve the constraint
+C Int [Int]. This constraint matches instances (A), (C) and (D), but the last
is more specific, and hence is chosen.
If (D) did not exist then (A) and (C) would still be matched, but neither is
-most specific. In that case, the program would be rejected even with
-. With
- enabled, it would be accepted and (A) or
+most specific. In that case, the program would be rejected, unless
+ is enabled, in which case it would be accepted and (A) or
(C) would be chosen arbitrarily.
@@ -4924,7 +5155,7 @@ the head of former is a substitution instance of the latter. For example
substituting a:=Int.
-However, GHC is conservative about committing to an overlapping instance. For example:
+GHC is conservative about committing to an overlapping instance. For example:
f :: [b] -> [b]
f x = ...
@@ -5021,56 +5252,6 @@ the program prints
would be to reject module Help
on the grounds that a later instance declaration might overlap the local one.)
-
-The willingness to be overlapped or incoherent is a property of
-the instance declaration itself, controlled by the
-presence or otherwise of the
-and flags when that module is
-being defined. Suppose we are searching for an instance of the
-target constraint(C ty1 .. tyn).
-The search works like this.
-
-
-Find all instances I that match the target constraint;
-that is, the target constraint is a substitution instance of I. These
-instance declarations are the candidates.
-
-
-
-Find all non-candidate instances
-that unify with the target constraint.
-Such non-candidates instances might match when the target constraint is further
-instantiated. If all of them were compiled with
-, proceed; if not, the search fails.
-
-
-
-Eliminate any candidate IX for which both of the following hold:
-
-
-There is another candidate IY that is strictly more specific;
-that is, IY is a substitution instance of IX but not vice versa.
-
-Either IX or IY was compiled with
-.
-
-
-
-
-
-
-If only one candidate remains, pick it.
-Otherwise if all remaining candidates were compiled with
-, pick an arbitrary candidate.
-
-
-
-These rules make it possible for a library author to design a library that relies on
-overlapping instances without the library client having to know.
-
-The flag implies the
- flag, but not vice versa.
-
@@ -5144,21 +5325,30 @@ it explicitly (for example, to give an instance declaration for it), you can imp
from module GHC.Exts.
-Haskell's defaulting mechanism is extended to cover string literals, when is specified.
+Haskell's defaulting mechanism (Haskell Report, Section 4.3.4)
+is extended to cover string literals, when is specified.
Specifically:
-Each type in a default declaration must be an
+Each type in a default declaration must be an
instance of Numor of IsString.
-The standard defaulting rule (Haskell Report, Section 4.3.4)
+If no default declaration is given, then it is just as if the module
+contained the declaration default( Integer, Double, String).
+
+
+
+The standard defaulting rule
is extended thus: defaulting applies when all the unresolved constraints involve standard classes
orIsString; and at least one is a numeric class
orIsString.
+So, for example, the expression length "foo" will give rise
+to an ambiguous use of IsString a0 which, becuase of the above
+rules, will default to String.
A small example:
@@ -5260,7 +5450,7 @@ class IsList l where
fromListN _ = fromList
-The FromList class and its methods are intended to be
+The IsList class and its methods are intended to be
used in conjunction with the extension.
The type function
@@ -5292,32 +5482,32 @@ of IsList, so that list notation becomes
useful for completely new data types.
Here are several example instances:
-instance FromList [a] where
+instance IsList [a] where
type Item [a] = a
fromList = id
toList = id
-instance (Ord a) => FromList (Set a) where
+instance (Ord a) => IsList (Set a) where
type Item (Set a) = a
fromList = Set.fromList
toList = Set.toList
-instance (Ord k) => FromList (Map k v) where
+instance (Ord k) => IsList (Map k v) where
type Item (Map k v) = (k,v)
fromList = Map.fromList
toList = Map.toList
-instance FromList (IntMap v) where
+instance IsList (IntMap v) where
type Item (IntMap v) = (Int,v)
fromList = IntMap.fromList
toList = IntMap.toList
-instance FromList Text where
+instance IsList Text where
type Item Text = Char
fromList = Text.pack
toList = Text.unpack
-instance FromList (Vector a) where
+instance IsList (Vector a) where
type Item (Vector a) = a
fromList = Vector.fromList
fromListN = Vector.fromListN
@@ -5885,28 +6075,39 @@ instance (GMapKey a, GMapKey b) => GMapKey (Either a b) where
data GMap (Either a b) v = GMapEither (GMap a v) (GMap b v)
...
-instance (Eq (Elem [e])) => Collects ([e]) where
+instance Eq (Elem [e]) => Collects [e] where
type Elem [e] = e
...
- The most important point about associated family instances is that the
- type indexes corresponding to class parameters must be identical to
- the type given in the instance head; here this is the first argument
- of GMap, namely Either a b,
- which coincides with the only class parameter.
-
-
- Instances for an associated family can only appear as part of
- instance declarations of the class in which the family was declared -
- just as with the equations of the methods of a class. Also in
- correspondence to how methods are handled, declarations of associated
- types can be omitted in class instances. If an associated family
- instance is omitted, the corresponding instance type is not inhabited;
+Note the following points:
+
+
+ The type indexes corresponding to class parameters must have precisely the same shape
+ the type given in the instance head. To have the same "shape" means that
+ the two types are identical modulo renaming of type variables. For example:
+
+instance Eq (Elem [e]) => Collects [e] where
+ -- Choose one of the following alternatives:
+ type Elem [e] = e -- OK
+ type Elem [x] = x -- OK
+ type Elem x = x -- BAD; shape of 'x' is different to '[e]'
+ type Elem [Maybe x] = x -- BAD: shape of '[Maybe x]' is different to '[e]'
+
+
+
+ An instances for an associated family can only appear as part of
+ an instance declarations of the class in which the family was declared,
+ just as with the equations of the methods of a class.
+
+
+ The instance for an associated type can be omitted in class instances. In that case,
+ unless there is a default instance (see ),
+ the corresponding instance type is not inhabited;
i.e., only diverging expressions, such
as undefined, can assume the type.
-
-
- Although it is unusual, there can be multiple
+
+
+ Although it is unusual, there (currently) can be multiple
instances for an associated family in a single instance declaration.
For example, this is legitimate:
@@ -5920,8 +6121,10 @@ instance GMapKey Flob where
Since you cannot give any subsequent instances for
(GMap Flob ...), this facility is most useful when
the free indexed parameter is of a kind with a finite number of alternatives
- (unlike *).
-
+ (unlike *). WARNING: this facility may be withdrawn in the future.
+
+
+
@@ -5939,22 +6142,50 @@ class IsBoolMap v where
instance IsBoolMap [(Int, Bool)] where
lookupKey = lookup
-The instance keyword is optional.
-
+In an instance declaration for the class, if no explicit
+type instance declaration is given for the associated type, the default declaration
+is used instead, just as with default class methods.
+
-There can also be multiple defaults for a single type, as long as they do not
-overlap:
+Note the following points:
+
+
+ The instance keyword is optional.
+
+
+ There can be at most one default declaration for an associated type synonym.
+
+
+ A default declaration is not permitted for an associated
+ data type.
+
+
+ The default declaration must mention only type variables on the left hand side,
+ and the right hand side must mention only type varaibels bound on the left hand side.
+ However, unlike the associated type family declaration itself,
+ the type variables of the default instance are independent of those of the parent class.
+
+
+Here are some examples:
-class C a where
- type F a b
- type F a Int = Bool
- type F a Bool = Int
+ class C a where
+ type F1 a :: *
+ type instance F1 a = [a] -- OK
+ type instance F1 a = a->a -- BAD; only one default instance is allowed
+
+ type F2 b a -- OK; note the family has more type
+ -- variables than the class
+ type instance F2 c d = c->d -- OK; you don't have to use 'a' in the type instance
+
+ type F3 a
+ type F3 [b] = b -- BAD; only type variables allowed on the LHS
+
+ type F4 a
+ type F4 b = a -- BAD; 'a' is not in scope in the RHS
+
-A default declaration is not permitted for an associated
-data type.
-
-
+
Scoping of class parameters
@@ -6296,11 +6527,11 @@ data T m a = MkT (m a) (T Maybe (m a))
The recursive use of T forced the second argument to have kind *.
However, just as in type inference, you can achieve polymorphic recursion by giving a
-complete kind signature for T. The way to give
-a complete kind signature for a data type is to use a GADT-style declaration with an
-explicit kind signature thus:
+complete kind signature for T. A complete
+kind signature is present when all argument kinds and the result kind are known, without
+any need for inference. For example:
-data T :: (k -> *) -> k -> * where
+data T (m :: k -> *) :: k -> * where
MkT :: m a -> T Maybe (m a) -> T m a
The complete user-supplied kind signature specifies the polymorphic kind for T,
@@ -6312,26 +6543,41 @@ In particular, the recursive use of T is at kind *
What exactly is considered to be a "complete user-supplied kind signature" for a type constructor?
These are the forms:
-
-A GADT-style data type declaration, with an explicit "::" in the header.
-For example:
+For a datatype, every type variable must be annotated with a kind. In a
+GADT-style declaration, there may also be a kind signature (with a top-level
+:: in the header), but the presence or absence of this annotation
+does not affect whether or not the declaration has a complete signature.
data T1 :: (k -> *) -> k -> * where ... -- Yes T1 :: forall k. (k->*) -> k -> *
data T2 (a :: k -> *) :: k -> * where ... -- Yes T2 :: forall k. (k->*) -> k -> *
data T3 (a :: k -> *) (b :: k) :: * where ... -- Yes T3 :: forall k. (k->*) -> k -> *
-data T4 a (b :: k) :: * where ... -- YES T4 :: forall k. * -> k -> *
+data T4 (a :: k -> *) (b :: k) where ... -- Yes T4 :: forall k. (k->*) -> k -> *
-data T5 a b where ... -- NO kind is inferred
-data T4 (a :: k -> *) (b :: k) where ... -- NO kind is inferred
-
-It makes no difference where you put the "::" but it must be there.
-You cannot give a complete kind signature using a Haskell-98-style data type declaration;
-you must use GADT syntax.
+data T5 a (b :: k) :: * where ... -- NO kind is inferred
+data T6 a b where ... -- NO kind is inferred
+
+
+
+
+For a class, every type variable must be annotated with a kind.
+
+For a type synonym, every type variable and the result type must all be annotated
+with kinds.
+
+type S1 (a :: k) = (a :: k) -- Yes S1 :: forall k. k -> k
+type S2 (a :: k) = a -- No kind is inferred
+type S3 (a :: k) = Proxy a -- No kind is inferred
+
+Note that in S2 and S3, the kind of the
+right-hand side is rather apparent, but it is still not considered to have a complete
+signature -- no inference can be done before detecting the signature.
+
An open type or data family declaration always has a
-complete user-specified kind signature; no "::" is required:
+complete user-specified kind signature; un-annotated type variables default to
+kind *.
data family D1 a -- D1 :: * -> *
data family D2 (a :: k) -- D2 :: forall k. k -> *
@@ -6346,10 +6592,12 @@ variable annotation from the class declaration. It keeps its polymorphic kind
in the associated type declaration. The variable b, however,
gets defaulted to *.
+
+
+A closed type familey has a complete signature when all of its type variables
+are annotated and a return kind (with a top-level ::) is supplied.
+
-In a complete user-specified kind signature, any un-decorated type variable to the
-left of the "::" is considered to have kind "*".
-If you want kind polymorphism, specify a kind variable.
@@ -6359,31 +6607,33 @@ If you want kind polymorphism, specify a kind variable.
Although all open type families are considered to have a complete
user-specified kind signature, we can relax this condition for closed type
families, where we have equations on which to perform kind inference. GHC will
-infer a kind for any type variable in a closed type family when that kind is
-never used in pattern-matching. If you want a kind variable to be used in
-pattern-matching, you must declare it explicitly.
-
-
-
-Here are some examples (assuming -XDataKinds is enabled):
-
-type family Not a where -- Not :: Bool -> Bool
- Not False = True
- Not True = False
-
-type family F a where -- ERROR: requires pattern-matching on a kind variable
- F Int = Bool
- F Maybe = Char
-
-type family G (a :: k) where -- G :: k -> *
- G Int = Bool
- G Maybe = Char
-
-type family SafeHead where -- SafeHead :: [k] -> Maybe k
- SafeHead '[] = Nothing -- note that k is not required for pattern-matching
- SafeHead (h ': t) = Just h
-
-
+infer kinds for the arguments and result types of a closed type family.
+
+GHC supports kind-indexed type families, where the
+family matches both on the kind and type. GHC will not infer
+this behaviour without a complete user-supplied kind signature, as doing so would
+sometimes infer non-principal types.
+
+For example:
+
+type family F1 a where
+ F1 True = False
+ F1 False = True
+ F1 x = x
+-- F1 fails to compile: kind-indexing is not inferred
+
+type family F2 (a :: k) where
+ F2 True = False
+ F2 False = True
+ F2 x = x
+-- F2 fails to compile: no complete signature
+
+type family F3 (a :: k) :: k where
+ F3 True = False
+ F3 False = True
+ F3 x = x
+-- OK
+
@@ -6569,14 +6819,78 @@ Note that this requires .
-
-Promoted Literals
+
+Promoting existential data constructors
+
+Note that we do promote existential data constructors that are otherwise suitable.
+For example, consider the following:
+
+data Ex :: * where
+ MkEx :: forall a. a -> Ex
+
+Both the type Ex and the data constructor MkEx
+get promoted, with the polymorphic kind 'MkEx :: forall k. k -> Ex.
+Somewhat surprisingly, you can write a type family to extract the member
+of a type-level existential:
+
+type family UnEx (ex :: Ex) :: k
+type instance UnEx (MkEx x) = x
+
+At first blush, UnEx seems poorly-kinded. The return kind
+k is not mentioned in the arguments, and thus it would seem
+that an instance would have to return a member of k
+for anyk. However, this is not the
+case. The type family UnEx is a kind-indexed type family.
+The return kind k is an implicit parameter to UnEx.
+The elaborated definitions are as follows:
+
+type family UnEx (k :: BOX) (ex :: Ex) :: k
+type instance UnEx k (MkEx k x) = x
+
+Thus, the instance triggers only when the implicit parameter to UnEx
+matches the implicit parameter to MkEx. Because k
+is actually a parameter to UnEx, the kind is not escaping the
+existential, and the above code is valid.
+
+
+
+See also Trac #7347.
+
+
+
+
+Promoting type operators
+
+Type operators are not promoted to the kind level. Why not? Because
+* is a kind, parsed the way identifiers are. Thus, if a programmer
+tried to write Either * Bool, would it be Either
+applied to * and Bool? Or would it be
+* applied to Either and Bool.
+To avoid this quagmire, we simply forbid promoting type operators to the kind level.
+
+
+
+
+
+
+
+Type-Level Literals
-Numeric and string literals are promoted to the type level, giving convenient
-access to a large number of predefined type-level constants. Numeric literals
-are of kind Nat, while string literals are of kind
-Symbol. These kinds are defined in the module
-GHC.TypeLits.
+GHC supports numeric and string literals at the type level, giving convenient
+access to a large number of predefined type-level constants.
+Numeric literals are of kind Nat, while string literals
+are of kind Symbol.
+This feature is enabled by the XDataKinds
+language extension.
+
+
+
+The kinds of the literals and all other low-level operations for this feature
+are defined in module GHC.TypeLits. Note that the module
+defines some type-level operators that clash with their value-level
+counterparts (e.g. (+)). Import and export declarations
+referring to these operators require an explicit namespace
+annotation (see ).
@@ -6611,44 +6925,84 @@ instance Has Point "y" Int where from (Point _ y) _ = y
example = from (Point 1 2) (Get :: Label "x")
-
-
-Promoting existential data constructors
+
+Runtime Values for Type-Level Literals
-Note that we do promote existential data constructors that are otherwise suitable.
-For example, consider the following:
+Sometimes it is useful to access the value-level literal assocaited with
+a type-level literal. This is done with the functions
+natVal and symbolVal. For example:
-data Ex :: * where
- MkEx :: forall a. a -> Ex
+GHC.TypeLits> natVal (Proxy :: Proxy 2)
+2
-Both the type Ex and the data constructor MkEx
-get promoted, with the polymorphic kind 'MkEx :: forall k. k -> Ex.
-Somewhat surprisingly, you can write a type family to extract the member
-of a type-level existential:
+These functions are overloaded because they need to return a different
+result, depending on the type at which they are instantiated.
-type family UnEx (ex :: Ex) :: k
-type instance UnEx (MkEx x) = x
+natVal :: KnownNat n => proxy n -> Integer
+
+-- instance KnownNat 0
+-- instance KnownNat 1
+-- instance KnownNat 2
+-- ...
-At first blush, UnEx seems poorly-kinded. The return kind
-k is not mentioned in the arguments, and thus it would seem
-that an instance would have to return a member of k
-for anyk. However, this is not the
-case. The type family UnEx is a kind-indexed type family.
-The return kind k is an implicit parameter to UnEx.
-The elaborated definitions are as follows:
+GHC discharges the constraint as soon as it knows what concrete
+type-level literal is being used in the program. Note that this works
+only for literals and not arbitrary type expressions.
+For example, a constraint of the form KnownNat (a + b)
+will not be simplified to
+(KnownNat a, KnownNat b); instead, GHC will keep the
+constraint as is, until it can simplify a + b to
+a constant value.
+
+
+
+
+It is also possible to convert a run-time integer or string value to
+the corresponding type-level literal. Of course, the resulting type
+literal will be unknown at compile-time, so it is hidden in an existential
+type. The conversion may be performed using someNatVal
+for integers and someSymbolVal for strings:
-type family UnEx (k :: BOX) (ex :: Ex) :: k
-type instance UnEx k (MkEx k x) = x
+someNatVal :: Integer -> Maybe SomeNat
+SomeNat :: KnownNat n => Proxy n -> SomeNat
-Thus, the instance triggers only when the implicit parameter to UnEx
-matches the implicit parameter to MkEx. Because k
-is actually a parameter to UnEx, the kind is not escaping the
-existential, and the above code is valid.
+The operations on strings are similar.
+
+Computing With Type-Level Naturals
-See also Trac #7347.
+GHC 7.8 can evaluate arithmetic expressions involving type-level natural
+numbers. Such expressions may be constructed using the type-families
+(+), (*), (^) for addition, multiplication,
+and exponentiation. Numbers may be compared using (<=?),
+which returns a promoted boolean value, or (<=), which
+compares numbers as a constraint. For example:
+
+GHC.TypeLits> natVal (Proxy :: Proxy (2 + 3))
+5
+
+
+
+At present, GHC is quite limited in its reasoning about arithmetic:
+it will only evalute the arithmetic type functions and compare the results---
+in the same way that it does for any other type function. In particular,
+it does not know more general facts about arithmetic, such as the commutativity
+and associativity of (+), for example.
+
+
+
+However, it is possible to perform a bit of "backwards" evaluation.
+For example, here is how we could get GHC to compute arbitrary logarithms
+at the type level:
+
+lg :: Proxy base -> Proxy (base ^ pow) -> Proxy pow
+lg _ _ = Proxy
+
+GHC.TypeLits> natVal (lg (Proxy :: Proxy 2) (Proxy :: Proxy 8))
+3
+
@@ -6696,6 +7050,21 @@ class (F a ~ b) => C a b where
with the class head. Method signatures are not affected by that
process.
+
+
+ The Coercible constraint
+
+ The constraint Coercible t1 t2 is similar to t1 ~
+ t2, but denotes representational equality between
+ t1 and t2 in the sense of Roles
+ (). It is exported by
+ Data.Coerce,
+ which also contains the documentation. More details and discussion can be found in
+ the paper
+ Safe Coercions".
+
+
+
@@ -7862,7 +8231,8 @@ scope over the methods defined in the where part. For exampl
4.5.5
of the Haskell Report)
can be completely switched off by
-.
+. Since GHC 7.8.1, the monomorphism
+restriction is switched off by default in GHCi's interactive options (see ).
@@ -7935,12 +8305,30 @@ pattern binding must have the same context. For example, this is fine:
An ML-style language usually generalises the type of any let-bound or where-bound variable,
so that it is as polymorphic as possible.
-With the flag GHC implements a slightly more conservative policy:
-it generalises only "closed" bindings.
-A binding is considered "closed" if either
+With the flag GHC implements a slightly more conservative policy,
+using the following rules:
-It is one of the top-level bindings of a module, or
-Its free variables are all themselves closed
+
+ A variable is closed if and only if
+
+ the variable is let-bound
+ one of the following holds:
+
+ the variable has an explicit type signature that has no free type variables, or
+ its binding group is fully generalised (see next bullet)
+
+
+
+
+
+
+ A binding group is fully generalised if and only if
+
+ each of its free variables is either imported or closed, and
+ the binding is not affected by the monomorphism restriction
+ (Haskell Report, Section 4.5.5)
+
+
For example, consider
@@ -7949,15 +8337,18 @@ g x = let h y = f y * 2
k z = z+x
in h x + k x
-Here f and g are closed because they are bound at top level.
-Also h is closed because its only free variable f is closed.
-But k is not closed because it mentions x which is locally bound.
-Another way to think of it is this: all closed bindings could be defined at top level.
-(In the example, we could move h to top level.)
-
-All of this applies only to bindings that lack an explicit type signature, so that GHC has to
-infer its type. If you supply a type signature, then that fixes type of the binding, end of story.
-
+Here f is generalised because it has no free variables; and its binding group
+is unaffected by the monomorphism restriction; and hence f is closed.
+The same reasoning applies to g, except that it has one closed free variable, namely f.
+Similarly h is closed, even though it is not bound at top level,
+because its only free variable f is closed.
+But k is not closed, because it mentions x which is not closed (because it is not let-bound).
+
+
+Notice that a top-level binding that is affected by the monomorphism restriction is not closed, and hence may
+in turn prevent generalisation of bindings that mention it.
+
+
The rationale for this more conservative strategy is given in
the papers "Let should not be generalised" and "Modular type inference with local assumptions", and
a related blog post.
@@ -7978,23 +8369,22 @@ with but type inference becomes less predica
, which is enabled by default.
-The goal of the typed holes warning is not to change the type system, but to help with writing Haskell
-code. Type holes can be used to obtain extra information from the type checker, which might otherwise be hard
-to get.
-Normally, the type checker is used to decide if a module is well typed or not. Using GHCi,
-users can inspect the (inferred) type signatures of all top-level bindings. However, determining the
-type of a single term is still hard. Yet while writing code, it could be helpful to know the type of
-the term you're about to write.
-
-
-
-This extension allows special placeholders, written with a leading underscore (e.g. "_",
+This option allows special placeholders, written with a leading underscore (e.g. "_",
"_foo", "_bar"), to be used as an expression.
During compilation these holes will generate an error message describing what type is expected there,
information about the origin of any free type variables, and a list of local bindings
that might help fill the hole with actual code.
+
+The goal of the typed holes warning is not to change the type system, but to help with writing Haskell
+code. Typed holes can be used to obtain extra information from the type checker, which might otherwise be hard
+to get.
+Normally, using GHCi, users can inspect the (inferred) type signatures of all top-level bindings.
+However, this method is less convenient with terms which are not defined on top-level or
+inside complex expressions. Holes allow to check the type of the term you're about to write.
+
+
Holes work together well with deferring type errors to runtime:
with -fdefer-type-errors, the error from a hole is also deferred, effctively making the hole
@@ -8023,15 +8413,15 @@ hole.hs:2:7:
-Multiple type holes can be used to find common type variables between expressions. For example:
+Multiple typed holes can be used to find common type variables between expressions. For example:
sum :: [Int] -> Int
-sum xx = foldr _f _z xs
+sum xs = foldr _f _z xs
Shows:
holes.hs:2:15:
- Found hole `_f' with type: Int-> Int -> Int
+ Found hole `_f' with type: Int -> Int -> Int
In the first argument of `foldr', namely `_'
In the expression: foldr _a _b _c
In an equation for `sum': sum x = foldr _a _b _c
@@ -8070,7 +8460,6 @@ unbound.hs:1:13:
In the second argument of `(:)', namely `_x'
In the expression: _x : _x
In an equation for `cons': cons = _x : _x
-Failed, modules loaded: none.
This ensures that an unbound identifier is never reported with a too polymorphic type, like
forall a. a, when used multiple times for types that can not be unified.
@@ -8212,9 +8601,7 @@ Wiki page.
constructions. You need to use the flag
- to switch these syntactic extensions on
- ( is no longer implied by
- ).
+ to switch these syntactic extensions on.
@@ -10436,6 +10823,23 @@ data T = T {-# NOUNPACK #-} !(Int,Int)
+
+OVERLAPPING, OVERLAPPABLE, OVERLAPS, and INCOHERENT pragmas
+
+The pragmas
+ OVERLAPPING,
+ OVERLAPPABLE,
+ OVERLAPS,
+ INCOHERENT are used to specify the overlap
+behavior for individual instances, as described in Section
+. The pragmas are written immediately
+after the instance keyword, like this:
+
+
+instance {-# OVERLAPPING #-} C t where ...
+
+
+
@@ -10712,8 +11116,8 @@ not be substituted, and the rule would not fire.
-
-How rules interact with INLINE/NOINLINE and CONLIKE pragmas
+
+How rules interact with INLINE/NOINLINE pragmas
Ordinary inlining happens at the same time as rule rewriting, which may lead to unexpected
@@ -10739,7 +11143,14 @@ would have been a better chance that f's RULE might fire.
The way to get predictable behaviour is to use a NOINLINE
pragma, or an INLINE[phase] pragma, on f, to ensure
that it is not inlined until its RULEs have had a chance to fire.
+The warning flag (see )
+warns about this situation.
+
+
+
+How rules interact with CONLIKE pragmas
+
GHC is very cautious about duplicating work. For example, consider
@@ -11084,69 +11495,6 @@ program even if fusion doesn't happen. More rules in GHC/List.lhs
-
- CORE pragma
-
- CORE pragma
- pragma, CORE
- core, annotation
-
-
- The external core format supports Note annotations;
- the CORE pragma gives a way to specify what these
- should be in your Haskell source code. Syntactically, core
- annotations are attached to expressions and take a Haskell string
- literal as an argument. The following function definition shows an
- example:
-
-
-f x = ({-# CORE "foo" #-} show) ({-# CORE "bar" #-} x)
-
-
- Semantically, this is equivalent to:
-
-
-g x = show x
-
-
-
-
- However, when external core is generated (via
- ), there will be Notes attached to the
- expressions show and x.
- The core function declaration for f is:
-
-
-
- f :: %forall a . GHCziShow.ZCTShow a ->
- a -> GHCziBase.ZMZN GHCziBase.Char =
- \ @ a (zddShow::GHCziShow.ZCTShow a) (eta::a) ->
- (%note "foo"
- %case zddShow %of (tpl::GHCziShow.ZCTShow a)
- {GHCziShow.ZCDShow
- (tpl1::GHCziBase.Int ->
- a ->
- GHCziBase.ZMZN GHCziBase.Char -> GHCziBase.ZMZN GHCziBase.Cha
-r)
- (tpl2::a -> GHCziBase.ZMZN GHCziBase.Char)
- (tpl3::GHCziBase.ZMZN a ->
- GHCziBase.ZMZN GHCziBase.Char -> GHCziBase.ZMZN GHCziBase.Cha
-r) ->
- tpl2})
- (%note "bar"
- eta);
-
-
-
- Here, we can see that the function show (which
- has been expanded out to a case expression over the Show dictionary)
- has a %note attached to it, as does the
- expression eta (which used to be called
- x).
-
-
-
-
@@ -11440,7 +11788,7 @@ described in Generative
type abstraction and type-level computation, published at POPL 2011.
-
+Nominal, Representational, and PhantomThe goal of the roles system is to track when two types have the same
@@ -11497,7 +11845,7 @@ are unrelated.
-
+Role inference
@@ -11551,7 +11899,7 @@ but role nominal for b.
-
+Role annotations
-XRoleAnnotations
@@ -11601,14 +11949,18 @@ to be at role nominal. This would be done with a declaration
Role annotations can also be used should a programmer wish to write
-a class with a representational (or phantom) role.
+a class with a representational (or phantom) role. However, as a class
+with non-nominal roles can quickly lead to class instance incoherence,
+it is necessary to also specify
+to allow non-nominal roles for classes.
The other place where role annotations may be necessary are in
hs-boot files (), where
the right-hand sides of definitions can be omitted. As usual, the
types/classes declared in an hs-boot file must match up
with the definitions in the hs file, including down to the
-roles. The default role is representational in hs-boot files,
+roles. The default role for datatypes
+is representational in hs-boot files,
corresponding to the common use case.
@@ -11637,7 +11989,7 @@ Here are some examples:
type role T4 nominal
data T4 a = MkT4 (a Int) -- OK, but nominal is higher than necessary
- type role C representational _
+ type role C representational _ -- OK, with -XIncoherentInstances
class C a b where ... -- OK, b will get a nominal role
type role X nominal
diff --git a/docs/users_guide/gone_wrong.xml b/docs/users_guide/gone_wrong.xml
index 114b06cfd690..bb5fcb0d4e77 100644
--- a/docs/users_guide/gone_wrong.xml
+++ b/docs/users_guide/gone_wrong.xml
@@ -146,7 +146,7 @@
must be re-compiled.
A useful option to alert you when interfaces change is
- -hi-diffs
+ -ddump-hi-diffs
option. It will run
diff on the changed interface file,
before and after, when applicable.
@@ -167,7 +167,7 @@
% rm *.o # scrub your object files
-% make my_prog # re-make your program; use -hi-diffs to highlight changes;
+% make my_prog # re-make your program; use -ddump-hi-diffs to highlight changes;
# as mentioned above, use -dcore-lint to be more paranoid
% ./my_prog ... # retry...
diff --git a/docs/users_guide/packages.xml b/docs/users_guide/packages.xml
index 3f2dd97aa535..ee29cb1c2f1d 100644
--- a/docs/users_guide/packages.xml
+++ b/docs/users_guide/packages.xml
@@ -88,7 +88,11 @@ $ ghc-pkg list
to expose a hidden package or hide an exposed one. Only modules
from exposed packages may be imported by your Haskell code; if
you try to import a module from a hidden package, GHC will emit
- an error message.
+ an error message. If there are a multiple exposed versions of a package,
+ GHC will prefer the latest one. Additionally, some packages may be
+ broken: that is, they are missing from the package database, or one of
+ their dependencies are broken; in this case; these packages are excluded
+ from the default set of packages.
@@ -137,8 +141,11 @@ exposed-modules: Network.BSD,
(e.g. network-1.0) or the version
number can be omitted if there is only one version of the
package installed. If there are multiple versions
- of P installed, then all other
- versions will become hidden.
+ of P installed and
+ was not specified, then all
+ other versions will become hidden.
+ supports thinning and renaming described in .
The
option also causes package P to
@@ -183,10 +190,12 @@ exposed-modules: Network.BSD,
Exposes a package like , but the
- package is named by its ID rather than by name. This is a
+ package is named by its installed package ID rather than by name. This is a
more robust way to name packages, and can be used to
select packages that would otherwise be shadowed. Cabal
passes flags to GHC.
+ supports thinning and renaming
+ described in .
@@ -258,19 +267,15 @@ exposed-modules: Network.BSD,
- foo
-
+ foo
+ Tells GHC the the module being compiled forms part of
- package foo.
+ package key foo; internally, these
+ keys are used to determine type equality and linker symbols.
If this flag is omitted (a very common case) then the
default package main is assumed.
- Note: the argument to
- should be the full
- package name-version for the package.
- For example:
- -package mypkg-1.2.
@@ -328,7 +333,7 @@ exposed-modules: Network.BSD,
Every complete Haskell program must define main in
module Main
- in package main. (Omitting the flag compiles
+ in package main. (Omitting the flag compiles
code for package main.) Failure to do so leads to a somewhat obscure
link-time error of the form:
@@ -367,6 +372,52 @@ _ZCMain_main_closure
name.
+
+ Thinning and renaming modules
+
+ When incorporating packages from multiple sources, you may end up
+ in a situation where multiple packages publish modules with the same name.
+ Previously, the only way to distinguish between these modules was to
+ use . However, since GHC 7.10,
+ the flags (and their variants) have been extended
+ to allow a user to explicitly control what modules a package brings into
+ scope, by analogy to the import lists that users can attach to module imports.
+
+
+
+ The basic syntax is that instead of specifying a package name P to the package
+ flag -package, instead we specify both a package name and a
+ parenthesized, comma-separated list of module names to import. For example,
+ -package "base (Data.List, Data.Bool)" makes only
+ Data.List and Data.Bool visible from
+ package base.
+ We also support renaming of modules, in case you need to refer to both modules
+ simultaneously; this is supporting by writing OldModName as
+ NewModName, e.g. -package "base (Data.Bool as
+ Bool). It's important to specify quotes
+ so that your shell passes the package name and thinning/renaming list as a
+ single argument to GHC.
+
+ Package imports with thinning/renaming do not hide other versions of the
+ package: e.g. if containers-0.9 is already exposed, -package
+ "containers-0.8 (Data.List as ListV8)" will only add an additional
+ binding to the environment. Similarly, -package "base (Data.Bool as
+ Bool)" -package "base (Data.List as List)" is equivalent to
+ -package "base (Data.Bool as Bool, Data.List as List)".
+ Literal names must refer to modules defined by the original package, so for
+ example -package "base (Data.Bool as Bool, Bool as Baz)" is
+ invalid unless there was a Bool module defined in the
+ original package. Hiding a package also clears all of its renamings.
+
+
+ You can use renaming to provide an alternate prelude, e.g.
+ -hide-all-packages -package "basic-prelude (BasicPrelude as
+ Prelude)", in lieu of the NoImplicitPrelude extension.
+
+
+
+
Package Databases
@@ -528,12 +579,11 @@ _ZCMain_main_closure
- Package IDs, dependencies, and broken packages
+ Installed package IDs, dependencies, and broken packagesEach installed package has a unique identifier (the
- “installed package ID”, or just “package
- ID” for short) , which distinguishes it from all other
- installed packages on the system. To see the package IDs
+ “installed package ID”), which distinguishes it from all other
+ installed packages on the system. To see the installed package IDs
associated with each installed package, use ghc-pkg
list -v:
@@ -549,10 +599,10 @@ using cache: /usr/lib/ghc-6.12.1/package.conf.d/package.cache
- The string in parentheses after the package name is the package
+ The string in parentheses after the package name is the installed package
ID: it normally begins with the package name and version, and
ends in a hash string derived from the compiled package.
- Dependencies between packages are expressed in terms of package
+ Dependencies between packages are expressed in terms of installed package
IDs, rather than just packages and versions. For example, take
a look at the dependencies of the haskell98
package:
@@ -570,14 +620,14 @@ depends: array-0.2.0.1-9cbf76a576b6ee9c1f880cf171a0928d
- The purpose of the package ID is to detect problems caused by
+ The purpose of the installed package ID is to detect problems caused by
re-installing a package without also recompiling the packages
that depend on it. Recompiling dependencies is necessary,
because the newly compiled package may have a different ABI
(Application Binary Interface) than the previous version, even
if both packages were built from the same source code using the
- same compiler. With package IDs, a recompiled
- package will have a different package ID from the previous
+ same compiler. With installed package IDs, a recompiled
+ package will have a different installed package ID from the previous
version, so packages that depended on the previous version are
now orphaned - one of their dependencies is not satisfied.
Packages that are broken in this way are shown in
@@ -691,7 +741,9 @@ haskell98-1.0.1.0
package; the specified action will be applied to all the matching
packages. A package specifier that matches all version of the package
can also be written pkg-*,
- to make it clearer that multiple packages are being matched.
+ to make it clearer that multiple packages are being matched. To match
+ against the installed package ID instead of just package name and version,
+ pass the flag.
@@ -1047,8 +1099,25 @@ ghc-pkg dot | tred | dot -Tpdf >pkgs.pdf
Output the ghc-pkg version number.
-
+
+
+
+
+
+
+
+
+ Causes ghc-pkg to interpret arguments
+ as installed package IDs (e.g., an identifier like
+ unix-2.3.1.0-de7803f1a8cd88d2161b29b083c94240
+ ). This is useful if providing just the package
+ name and version are ambiguous (in old versions of GHC, this
+ was guaranteed to be unique, but this invariant no longer
+ necessarily holds).
+
+
+
@@ -1152,8 +1221,8 @@ ghc-pkg dot | tred | dot -Tpdf >pkgs.pdf
To compile a module which is to be part of a new package,
- use the -package-name option ().
- Failure to use the -package-name option
+ use the -this-package-key option ().
+ Failure to use the -this-package-key option
when compiling a package will probably result in disaster, but
you will only discover later when you attempt to import modules
from the package. At this point GHC will complain that the
@@ -1288,7 +1357,7 @@ haddock-html: /usr/share/doc/ghc/html/libraries/unix
idpackage specification
- The package ID. It is up to you to choose a suitable
+ The installed package ID. It is up to you to choose a suitable
one.
@@ -1445,6 +1514,25 @@ haddock-html: /usr/share/doc/ghc/html/libraries/unix
+
+
+ reexported-modules
+ reexported-modulesreexport specification
+
+
+ Modules reexported by this package. This list takes
+ the form of pkg:OldName as NewName
+ (A@orig-pkg-0.1-HASH): the first portion of the
+ string is the user-written reexport specification (possibly
+ omitting the package qualifier and the renaming), while the
+ parenthetical is the original package which exposed the
+ module under are particular name. Reexported modules have
+ a relaxed overlap constraint: it's permissible for two
+ packages to reexport the same module as the same name if the
+ reexported moduleis identical.
+
+
+
trusted
diff --git a/docs/users_guide/phases.xml b/docs/users_guide/phases.xml
index acb53a73d907..8a5589acdab1 100644
--- a/docs/users_guide/phases.xml
+++ b/docs/users_guide/phases.xml
@@ -216,15 +216,6 @@
Pass option to the LLVM compiler.
-
-
- option
-
-
-
- Pass option to the mangler.
-
- option
@@ -585,8 +576,22 @@ $ cat foo.hspp
Omit code generation (and all later phases)
- altogether. Might be of some use if you just want to see
- dumps of the intermediate compilation phases.
+ altogether. This is useful if you're only interested in
+ type checking code.
+
+
+
+
+
+
+
+
+
+ Always write interface files. GHC will normally write
+ interface files automatically, but this flag is useful with
+ , which normally suppresses generation
+ of interface files. This is useful if you want to type check
+ over multiple runs of GHC without compiling dependencies.
diff --git a/docs/users_guide/safe_haskell.xml b/docs/users_guide/safe_haskell.xml
index 04a244b3aff7..10d0a638f0df 100644
--- a/docs/users_guide/safe_haskell.xml
+++ b/docs/users_guide/safe_haskell.xml
@@ -186,18 +186,18 @@
- The use of the flag to compile the Danger module
- restricts the features of Haskell that can be used to a
- safe subset. This includes
- disallowing unsafePerformIO, Template Haskell, pure
- FFI functions, Generalized Newtype Deriving, RULES and restricting the
- operation of Overlapping Instances. The flag also
- restricts the modules can be imported by Danger to only those that are
- considered trusted. Trusted modules are those compiled with
- , where GHC provides a mechanical guarantee that
- the code is safe. Or those modules compiled with
- , where the module author claims that the
- module is Safe.
+ The use of the flag to compile the
+ Danger module restricts the features of Haskell that can be used
+ to a safe subset. This
+ includes disallowing unsafePerformIO,
+ Template Haskell, pure FFI functions, RULES and restricting the
+ operation of Overlapping Instances. The
+ flag also restricts the modules can be imported by Danger to
+ only those that are considered trusted. Trusted modules are
+ those compiled with , where GHC provides
+ a mechanical guarantee that the code is safe. Or those modules
+ compiled with , where the module
+ author claims that the module is Safe.
@@ -254,6 +254,7 @@
for example, that the unsafePerformIO :: IO a -> a
function is disallowed in the safe language.
+
Module boundary control — Haskell
code compiled using the safe language is guaranteed to only access
symbols that are publicly available to it through other modules export
@@ -263,10 +264,8 @@
through careful use of its export list then code compiled using the
safe language that imports M is guaranteed to respect those invariants.
Because of this, Template
- Haskell and
- GeneralizedNewtypeDeriving
- are disabled in the safe language as they can be used
- to violate this property.
+ Haskell is disabled in the safe language as it can be
+ used to violate this property.
Semantic consistency — The safe
language is strictly a subset of Haskell as implemented by GHC. Any
@@ -294,10 +293,6 @@
following features:
- GeneralizedNewtypeDeriving — It can
- be used to violate constructor access control, by allowing untrusted
- code to manipulate protected data types in ways the data type author
- did not intend, breaking invariants they have established.TemplateHaskell — Is particularly
dangerous, as it can cause side effects even at compilation time and
can be used to access constructors of abstract data types.
diff --git a/docs/users_guide/ug-book.xml.in b/docs/users_guide/ug-book.xml.in
index dc5d4f7c35b7..b87563ac3b01 100644
--- a/docs/users_guide/ug-book.xml.in
+++ b/docs/users_guide/ug-book.xml.in
@@ -17,7 +17,6 @@
&lang-features;
&ffi-chap;
&extending-ghc;
-&external-core;
&wrong;
&utils;
&win32-dll;
diff --git a/docs/users_guide/ug-ent.xml.in b/docs/users_guide/ug-ent.xml.in
index ce87089f2446..6753ff7e5b69 100644
--- a/docs/users_guide/ug-ent.xml.in
+++ b/docs/users_guide/ug-ent.xml.in
@@ -3,7 +3,7 @@
-
+
@@ -12,7 +12,6 @@
-
diff --git a/docs/users_guide/using.xml b/docs/users_guide/using.xml
index 8d8211eb5aad..921d5a3345c3 100644
--- a/docs/users_guide/using.xml
+++ b/docs/users_guide/using.xml
@@ -899,20 +899,37 @@ ghci> :set -fprint-explicit-foralls
ghci> :t f
f :: forall a. a -> a
- Using makes GHC print kind-foralls and kind applications
+However, regardless of the flag setting, the quantifiers are printed under these circumstances:
+
+For nested foralls, e.g.
+
+ghci> :t GHC.ST.runST
+GHC.ST.runST :: (forall s. GHC.ST.ST s a) -> a
+
+
+If any of the quantified type variables has a kind
+that mentions a kind variable, e.g.
+
+ghci> :i Data.Coerce.coerce
+coerce ::
+ forall (k :: BOX) (a :: k) (b :: k). Coercible a b => a -> b
+ -- Defined in GHC.Prim
+
+
+
+
+
+ Using makes GHC print kind arguments
in types, which are normally suppressed. This can be important when you are using kind polymorphism.
For example:
ghci> :set -XPolyKinds
ghci> data T a = MkT
ghci> :t MkT
-MkT :: T b
+MkT :: forall (k :: BOX) (a :: k). T a
ghci> :set -fprint-explicit-foralls
ghci> :t MkT
-MkT :: forall (b::k). T b
-ghci> :set -fprint-explicit-kinds
-ghci> :t MkT
-MkT :: forall (k::BOX) (b:k). T b
+MkT :: forall (k :: BOX) (a :: k). T k a
@@ -1719,15 +1736,50 @@ f "2" = 2
unused binds, warningbinds, unusedReport any function definitions (and local bindings)
- which are unused. For top-level functions, the warning is
- only given if the binding is not exported.
- A definition is regarded as "used" if (a) it is exported, or (b) it is
- mentioned in the right hand side of another definition that is used, or (c) the
- function it defines begins with an underscore. The last case provides a
- way to suppress unused-binding warnings selectively.
- Notice that a variable
- is reported as unused even if it appears in the right-hand side of another
- unused binding.
+ which are unused. More precisely:
+
+
+ Warn if a binding brings into scope a variable that is not used,
+ except if the variable's name starts with an underscore. The "starts-with-underscore"
+ condition provides a way to selectively disable the warning.
+
+
+ A variable is regarded as "used" if
+
+ It is exported, or
+ It appears in the right hand side of a binding that binds at
+ least one used variable that is used
+
+ For example
+
+module A (f) where
+f = let (p,q) = rhs1 in t p -- Warning about unused q
+t = rhs3 -- No warning: f is used, and hence so is t
+g = h x -- Warning: g unused
+h = rhs2 -- Warning: h is only used in the right-hand side of another unused binding
+_w = True -- No warning: _w starts with an underscore
+
+
+
+
+ Warn if a pattern binding binds no variables at all, unless it is a lone, possibly-banged, wild-card pattern.
+ For example:
+
+Just _ = rhs3 -- Warning: unused pattern binding
+(_, _) = rhs4 -- Warning: unused pattern binding
+_ = rhs3 -- No warning: lone wild-card pattern
+!_ = rhs4 -- No warning: banged wild-card pattern; behaves like seq
+
+ The motivation for allowing lone wild-card patterns is they
+ are not very different from _v = rhs3,
+ which elicits no warning; and they can be useful to add a type
+ constraint, e.g. _ = x::Int. A lone
+ banged wild-card pattern is is useful as an alternative
+ (to seq) way to force evaluation.
+
+
+
+
@@ -1814,6 +1866,16 @@ f "2" = 2
+
+ :
+
+
+ Warn if a rewrite RULE might fail to fire because the function might be
+ inlined before the rule has a chance to fire. See .
+
+
+
+
If you're feeling really paranoid, the
@@ -2967,44 +3029,6 @@ data D = D !C
&runtime;
-
-
- Generating and compiling External Core Files
-
- intermediate code generation
-
- GHC can dump its optimized intermediate code (said to be in “Core” format)
- to a file as a side-effect of compilation. Non-GHC back-end tools can read and process Core files; these files have the suffix
- .hcr. The Core format is described in
- An External Representation for the GHC Core Language,
- and sample tools
- for manipulating Core files (in Haskell) are available in the
- extcore package on Hackage. Note that the format of .hcr
- files is different from the Core output format that GHC generates
- for debugging purposes (), though the two formats appear somewhat similar.
-
- The Core format natively supports notes which you can add to
- your source code using the CORE pragma (see ).
-
-
-
-
-
-
-
-
-
- Generate .hcr files.
-
-
-
-
-
-Currently (as of version 6.8.2), GHC does not have the ability to read in External Core files as source. If you would like GHC to have this ability, please make your wishes known to the GHC Team.
-
-
-
&debug;
&flags;
diff --git a/docs/vh/Makefile b/docs/vh/Makefile
deleted file mode 100644
index 4410e4953dc9..000000000000
--- a/docs/vh/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-TOP = ../..
-include $(TOP)/mk/boilerplate.mk
-
-XML_DOC = vh
-INSTALL_XML_DOC = vh
-
-include $(TOP)/mk/target.mk
diff --git a/docs/vh/vh.xml b/docs/vh/vh.xml
deleted file mode 100644
index 5c25e3109c6a..000000000000
--- a/docs/vh/vh.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-
-
-
-
-
-
- Visual Haskell User's Guide
-
- Simon
- Marlow
- simonmar@microsoft.com
-
-
- Krasimir
- Angelov
- kr.angelov@gmail.com
-
-
-
-
-
- Introduction
-
- Visual Haskell is a plugin for Microsoft's Visual Studio
- development environment to support development of Haskell software.
- Like the other Visual languages, Visual Haskell integrates with the
- Visual Studio editor to provide interactive features to aid Haskell
- development, and it enables the construction of projects consisting of
- multiple Haskell modules.
-
-
- Installing Visual Haskell
-
- In order to use Visual Haskell, you need Visual Studio .NET
- 2003. Right now, this is the only supported version of Visual
- Studio - unfortunately we haven't yet added support for the 2005
- Beta. The Express languages (Visual C++ Express etc.) also will not
- work, because they don't have support for plugins.
-
- You don't need to install GHC separately: Visual Haskell
- is bundled with a complete GHC distribution, and various other tools
- (Happy, Alex, Haddock).
-
- The latest Visual Haskell installer can be obtained from
- here:
-
- http://www.haskell.org/visualhaskell/
-
-
-
- Release Notes
-
-
- Version 0.0, first release
-
- This release is a technology preview, and should be considered
- alpha quality. It works for us, but you are fairly likely to
- encounter problems. If you're willing to try it out and report
- bugs, we'd be grateful for the feedback.
-
-
-
- This release of Visual Haskell is bundled with a
- development snapshot of GHC, version 6.5 from around 14
- September 2005. This version of GHC is used to provide the
- interactive editing features, and will be used to compile all
- code inside Visual Haskell. It is possible that in future
- releases we may be able to relax this tight coupling between
- Visual Haskell and the bundled GHC.
-
- Please note that future releases of Visual
- Haskell will update the compiler, and hence the
- packages, and so may break your code. Also note that because
- the bundled GHC is not a released version, it may have bugs and
- quirks itself: please report them as usual to
- glasgow-haskell-bugs@haskell.org.
-
-
-
- We're not making source code for the plugin generally
- available at this time, due to licensing restrictions on the
- Visual Studio APIs that the plugin uses (for more
- information see Visual Studio
- Extensibility Center). If you're interested in
- contributing to Visual Haskell, please get in touch with the
- authors.
-
-
-
-
-
-
- Getting support, reporting bugs
- Please report bugs to
- glasgow-haskell-bugs@haskell.org (subscribe here), clearly indicating
- that your bug report relates to Visual Haskell, and giving as much
- information as possible so that we can reproduce the bug. Even if
- you can't reproduce the bug reliably, it is still useful to report
- what you've seen.
-
- For help and support, use the
- glasgow-haskell-users@haskell.org (subscribe here) mailing list.
-
-
-
- License
-
-