SeaLang/src/mir.rs
2024-09-01 14:28:48 +02:00

3216 lines
120 KiB
Rust

//! Machine-level Intermediate Representation
use std::cmp::Ordering;
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::fmt::Display;
use std::hash::{Hash, Hasher};
use std::u32;
use itertools::Itertools;
use liveness::LivenessBuilder;
use crate::asm::amd64::Register;
use crate::ast::IntegralType;
use crate::string_table::{Index as StringsIndex, StringTable};
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum Type {
/// 8bit integer
Byte,
/// 16bit integer
Word,
/// 32bit integer
DWord,
/// 64bit integer
QWord,
/// 32bit ieee-754
SinglePrecision,
/// 64bit ieee-754
DoublePrecision,
// XWord,
// YWord,
// ZWord,
}
impl core::fmt::Display for Type {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let name = match self {
Type::Byte => "byte",
Type::Word => "word",
Type::DWord => "dword",
Type::QWord => "qword",
Type::SinglePrecision => "f32",
Type::DoublePrecision => "f64",
};
write!(f, "{name}")
}
}
impl Type {
pub fn int_repr(self) -> Self {
match self {
Type::SinglePrecision => Self::DWord,
Type::DoublePrecision => Self::QWord,
_ => self,
}
}
pub fn register_width(&self, reg: Register) -> Register {
match self {
Type::Byte => reg.into_byte(),
Type::Word => reg.into_word(),
Type::DWord => reg.into_dword(),
Type::QWord => reg.into_qword(),
Type::SinglePrecision | Type::DoublePrecision => reg,
}
}
pub const fn is_floating(self) -> bool {
match self {
Type::SinglePrecision | Type::DoublePrecision => true,
_ => false,
}
}
pub const fn from_bitsize_int(size: u32) -> Type {
match size {
..=8 => Self::Byte,
9..=16 => Self::Word,
17..=32 => Self::DWord,
33..=64 => Self::QWord,
_ => unimplemented!(),
}
}
pub const fn from_bytesize_int(size: u32) -> Type {
Self::from_bitsize_int(size * 8)
}
pub const fn bits(self) -> u32 {
match self {
Type::Byte => 8,
Type::Word => 16,
Type::DWord => 32,
Type::QWord => 64,
Type::SinglePrecision => 32,
Type::DoublePrecision => 64,
}
}
pub const fn constant_inst(self) -> Inst {
match self {
Type::Byte => Inst::ConstantByte,
Type::Word => Inst::ConstantWord,
Type::SinglePrecision | Type::DWord => Inst::ConstantDWord,
Type::DoublePrecision | Type::QWord => Inst::ConstantQWord,
}
}
pub const fn bytes(self) -> u32 {
match self {
Type::Byte => 1,
Type::Word => 2,
Type::DWord => 4,
Type::QWord => 8,
Type::SinglePrecision => 4,
Type::DoublePrecision => 8,
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum Inst {
/// index
Label,
// Constant(Type),
/// index
ConstantBytes,
/// imm8
ConstantByte,
/// imm16
ConstantWord,
/// imm32
ConstantDWord,
/// imm64
ConstantQWord,
/// imm16fp
ConstantSinglePrecision,
/// imm32fp
ConstantDoublePrecision,
/// src
LoadRegister(Type), // hint for loading value into register
/// ast-node
ExternRef,
/// size, align
Alloca,
/// src
Load(Type),
/// src, dst
Store(Type),
/// src, idx,
GetElementPtr(Type),
Parameter(Type),
/// lhs, rhs
Add(Type),
/// lhs, rhs
Sub(Type),
/// lhs, rhs
Mul(Type),
/// lhs, rhs
MulSigned(Type),
/// lhs, rhs
Div(Type),
/// lhs, rhs
MulSSE(Type),
/// lhs, rhs
DivSSE(Type),
/// lhs, rhs
RemFP(Type),
/// lhs, rhs
DivSigned(Type),
/// lhs, rhs
Rem(Type),
/// lhs, rhs
RemSigned(Type),
/// lhs, rhs
BitAnd(Type),
/// lhs, rhs
BitOr(Type),
/// lhs, rhs
BitXOr(Type),
/// lhs
Negate(Type),
/// lhs
Not(Type),
/// lhs, rhs
ShiftLeft(Type),
/// lhs, rhs
ShiftRightSigned(Type),
/// lhs, rhs
ShiftRightUnsigned(Type),
/// lhs
ReturnValue(Type),
Return,
/// lhs
SignExtend(Type),
/// lhs
ZeroExtend(Type),
// type specifies input type
/// lhs
IsZero(Type),
//FunctionStart,
/// lhs, rhs
Cmp(Type),
/// lhs
IsEq(bool),
/// lhs
IsNeq(bool),
/// lhs
IsGt(bool),
/// lhs
IsLt(bool),
/// lhs
IsGe(bool),
/// lhs
IsLe(bool),
// jrcxz for now
/// lhs, rhs
Branch(u32),
/// lhs
Jump,
/// lhs, rhs
Phi2(Type),
}
impl Inst {
fn value_type(&self) -> Option<Type> {
match self {
Inst::Label
| Inst::ConstantBytes
| Inst::ExternRef
| Inst::Alloca
| Inst::ReturnValue(_)
| Inst::Store(_)
| Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord
| Inst::ConstantSinglePrecision
| Inst::ConstantDoublePrecision
| Inst::Cmp(_)
| Inst::Branch(_)
| Inst::Jump
| Inst::Return => None,
Inst::Phi2(ty)
| Inst::GetElementPtr(ty)
| Inst::Load(ty)
| Inst::LoadRegister(ty)
| Inst::Parameter(ty)
| Inst::Add(ty)
| Inst::Sub(ty)
| Inst::Mul(ty)
| Inst::MulSigned(ty)
| Inst::Div(ty)
| Inst::DivSigned(ty)
| Inst::Rem(ty)
| Inst::RemSigned(ty)
| Inst::MulSSE(ty)
| Inst::DivSSE(ty)
| Inst::RemFP(ty)
| Inst::BitAnd(ty)
| Inst::BitOr(ty)
| Inst::BitXOr(ty)
| Inst::Negate(ty)
| Inst::Not(ty)
| Inst::SignExtend(ty)
| Inst::ZeroExtend(ty)
| Inst::ShiftLeft(ty)
| Inst::ShiftRightSigned(ty)
| Inst::ShiftRightUnsigned(ty) => Some(*ty),
Inst::IsZero(_)
| Inst::IsEq(_)
| Inst::IsNeq(_)
| Inst::IsGt(_)
| Inst::IsLt(_)
| Inst::IsGe(_)
| Inst::IsLe(_) => Some(Type::Byte),
}
}
#[allow(dead_code)]
fn result_is_register(&self) -> bool {
// basically, when an arithmetic instruction has two immediates, then just replace it with a mov into the dst reg
// TODO: need to account for spilled values eventually; probably move this to `Mir`.
match self {
Inst::Label
| Inst::Branch(_)
| Inst::Jump
| Inst::ConstantBytes
| Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord
| Inst::ConstantSinglePrecision
| Inst::ConstantDoublePrecision
| Inst::ExternRef
| Inst::Alloca
| Inst::Store(_)
| Inst::ReturnValue(_)
| Inst::Cmp(_)
| Inst::Return => false,
Inst::GetElementPtr(_)
| Inst::Load(_)
| Inst::LoadRegister(_)
| Inst::Parameter(_)
| Inst::Add(_)
| Inst::Sub(_)
| Inst::Mul(_)
| Inst::MulSigned(_)
| Inst::Div(_)
| Inst::DivSigned(_)
| Inst::Rem(_)
| Inst::RemSigned(_)
| Inst::MulSSE(_)
| Inst::DivSSE(_)
| Inst::RemFP(_)
| Inst::BitAnd(_)
| Inst::BitOr(_)
| Inst::BitXOr(_)
| Inst::Negate(_)
| Inst::Not(_)
| Inst::SignExtend(_)
| Inst::ZeroExtend(_)
| Inst::IsZero(_)
| Inst::IsEq(_)
| Inst::IsNeq(_)
| Inst::IsGt(_)
| Inst::IsLt(_)
| Inst::IsGe(_)
| Inst::IsLe(_)
| Inst::Phi2(_)
| Inst::ShiftLeft(_)
| Inst::ShiftRightSigned(_)
| Inst::ShiftRightUnsigned(_) => true,
}
}
}
#[derive(Clone, Copy)]
pub union Data {
none: (),
imm8: u8,
imm16: u16,
imm32: u32,
imm64: u64,
index: StringsIndex,
node: u32,
binary: (u32, u32),
}
impl From<u8> for Data {
fn from(value: u8) -> Self {
Data::imm8(value)
}
}
impl From<u16> for Data {
fn from(value: u16) -> Self {
Data::imm16(value)
}
}
impl From<u32> for Data {
fn from(value: u32) -> Self {
Data::imm32(value)
}
}
impl From<u64> for Data {
fn from(value: u64) -> Self {
Data::imm64(value)
}
}
impl Data {
pub fn imm8(v: u8) -> Data {
Self { imm8: v }
}
pub fn imm16(v: u16) -> Data {
Self { imm16: v }
}
pub fn imm32(v: u32) -> Data {
Self { imm32: v }
}
pub fn imm64(v: u64) -> Data {
Self { imm64: v }
}
pub fn index(v: StringsIndex) -> Data {
Self { index: v }
}
pub fn node(v: u32) -> Data {
Self { node: v }
}
pub fn binary(lhs: u32, rhs: u32) -> Data {
Self { binary: (lhs, rhs) }
}
pub fn binary_noderefs(lhs: NodeRef, rhs: NodeRef) -> Data {
Self {
binary: (lhs.0, rhs.0),
}
}
pub fn none() -> Data {
Self { none: () }
}
pub fn as_imm8(self) -> u8 {
unsafe { self.imm8 }
}
pub fn as_imm16(self) -> u16 {
unsafe { self.imm16 }
}
pub fn as_imm32(self) -> u32 {
unsafe { self.imm32 }
}
pub fn as_imm64(self) -> u64 {
unsafe { self.imm64 }
}
pub fn as_index(self) -> StringsIndex {
unsafe { self.index }
}
pub fn as_node(self) -> u32 {
unsafe { self.node }
}
pub fn as_noderef(self) -> NodeRef {
NodeRef(self.as_node())
}
pub fn as_binary_noderefs(self) -> (NodeRef, NodeRef) {
let (a, b) = self.as_binary();
(a.into(), b.into())
}
pub fn as_binary(self) -> (u32, u32) {
unsafe { self.binary }
}
}
use bitflags::bitflags;
bitflags! {
#[derive(Debug, Clone, Copy)]
struct BinaryOperandFlags: u8 {
const RhsReg = 0b00000001;
const RhsMem = 0b00000010;
const RhsImm = 0b00000100;
const LhsReg = 0b10000000;
const LhsMem = 0b01000000;
const LhsImm = 0b00100000;
const RegReg = 0b10000001;
const RegMem = 0b10000010;
const RegImm = 0b10000100;
const MemReg = 0b01000001;
const MemMem = 0b01000010;
const MemImm = 0b01000100;
const RhsAll = 0b00000111;
const LhsAll = 0b11100000;
const NULL = 0b0;
}
}
bitflags! {
#[derive(Debug, Clone, Copy)]
struct OperandKinds: u8 {
const RegReg = 0b00000001;
const RegMem = 0b00000010;
const RegImm = 0b00000100;
const MemReg = 0b00001000;
const MemMem = 0b00010000;
const MemImm = 0b00100000;
}
}
impl OperandKinds {
/// works for: add,sub,or,and,sbb,adc,cmp
fn add() -> Self {
Self::RegImm | Self::MemImm | Self::RegMem | Self::MemReg | Self::RegReg
}
/// imul is special...
fn imul() -> Self {
Self::RegImm | Self::MemImm | Self::RegMem | Self::RegReg
}
fn any_mem_reg() -> Self {
Self::MemMem | Self::MemReg | Self::RegMem | Self::RegReg
}
/// works for: div,idiv,mul
fn mul() -> Self {
Self::RegMem | Self::RegReg
}
/// works for: mulss,mulsd,divss,divsd,addss,addsd,subss,subsd
fn sse() -> Self {
Self::RegMem | Self::RegReg
}
/// works for: shl,shr,sar,sal,test
fn shift() -> Self {
Self::RegReg | Self::RegImm // | Self::MemImm | Self::MemReg
}
const fn to_rhs_binop(self) -> BinaryOperandFlags {
let reg = if self.intersects(Self::RegReg.union(Self::MemReg)) {
BinaryOperandFlags::RhsReg
} else {
BinaryOperandFlags::empty()
};
let mem = if self.intersects(Self::RegMem.union(Self::MemMem)) {
BinaryOperandFlags::RhsMem
} else {
BinaryOperandFlags::empty()
};
let imm = if self.intersects(Self::RegImm.union(Self::MemImm)) {
BinaryOperandFlags::RhsImm
} else {
BinaryOperandFlags::empty()
};
reg.union(mem).union(imm)
}
const fn to_lhs_binop(self) -> BinaryOperandFlags {
let reg = if self.intersects(Self::RegReg.union(Self::RegImm).union(Self::RegMem)) {
BinaryOperandFlags::LhsReg
} else {
BinaryOperandFlags::empty()
};
let mem = if self.intersects(Self::MemReg.union(Self::MemMem).union(Self::MemImm)) {
BinaryOperandFlags::LhsMem
} else {
BinaryOperandFlags::empty()
};
reg.union(mem)
}
fn reduce_with_rhs(self, lhs: BinaryOperandFlags) -> OperandKinds {
let mut out = self;
if !lhs.contains(BinaryOperandFlags::RhsImm) {
out = out.difference(Self::MemImm | Self::RegImm);
}
if !lhs.contains(BinaryOperandFlags::RhsMem) {
out = out.difference(Self::MemMem | Self::RegMem);
}
if !lhs.contains(BinaryOperandFlags::RhsReg) {
out = out.difference(Self::RegReg | Self::MemReg);
}
out
}
#[allow(dead_code)]
fn reduce_with_lhs(self, lhs: BinaryOperandFlags) -> OperandKinds {
let mut out = self;
if !lhs.contains(BinaryOperandFlags::LhsMem) {
out = out.difference(Self::MemReg | Self::MemMem | Self::MemImm);
}
if !lhs.contains(BinaryOperandFlags::LhsReg) {
out = out.difference(Self::RegReg | Self::RegMem | Self::RegImm);
}
out
}
}
enum OperandKind {
Mem,
Imm,
Reg,
}
impl OperandKind {
const fn as_rhs(self) -> BinaryOperandFlags {
match self {
OperandKind::Mem => BinaryOperandFlags::RhsMem,
OperandKind::Imm => BinaryOperandFlags::RhsImm,
OperandKind::Reg => BinaryOperandFlags::RhsReg,
}
}
const fn as_lhs(self) -> BinaryOperandFlags {
match self {
OperandKind::Mem => BinaryOperandFlags::LhsMem,
OperandKind::Imm => BinaryOperandFlags::LhsImm,
OperandKind::Reg => BinaryOperandFlags::LhsReg,
}
}
}
struct BinaryOperands<'a> {
mir: &'a mut Mir,
commutative: bool,
kinds: OperandKinds,
}
struct BinaryOperandsRunner<'a> {
inner: BinaryOperands<'a>,
lhs: (u32, Type),
rhs: (u32, Type),
}
impl<'a> BinaryOperandsRunner<'a> {
#[allow(dead_code)]
fn new(inner: BinaryOperands<'a>, lhs: u32, lhs_type: Type, rhs: u32, rhs_type: Type) -> Self {
Self {
inner,
lhs: (lhs, lhs_type),
rhs: (rhs, rhs_type),
}
}
fn mir_mut(&mut self) -> &mut Mir {
self.inner.mir
}
fn mir(&self) -> &Mir {
self.inner.mir
}
fn lhs(&self) -> u32 {
self.lhs.0
}
fn rhs(&self) -> u32 {
self.rhs.0
}
#[allow(dead_code)]
fn lhs_type(&self) -> Type {
self.lhs.1
}
fn rhs_type(&self) -> Type {
self.rhs.1
}
fn lhs_rhs(&self) -> (u32, u32) {
(self.lhs(), self.rhs())
}
fn canonicalise_lhs_with_reduced_kinds(&mut self, kinds: OperandKinds) {
let (lhs, ty) = self.lhs;
let l_legal = kinds.to_lhs_binop();
let l_kind = self.mir().as_operand_kind(self.lhs()).as_lhs();
if l_legal.contains(l_kind) {
} else if l_legal.contains(BinaryOperandFlags::LhsReg) {
self.lhs.0 = self.mir_mut().to_reg(ty, lhs);
} else if l_legal.contains(BinaryOperandFlags::LhsMem) {
self.lhs.0 = self.mir_mut().gen_spill_value(lhs);
} else {
unreachable!()
}
}
fn try_swap(&mut self) {
let (lhs, rhs) = self.lhs_rhs();
let l_legal = self.inner.kinds.to_lhs_binop();
let l_kind = self.mir().as_operand_kind(lhs).as_lhs();
let r_kind = self.mir().as_operand_kind(rhs).as_rhs();
if self.inner.commutative && (!l_legal.contains(l_kind) && l_legal.contains(r_kind)) {
core::mem::swap(&mut self.lhs, &mut self.rhs);
}
}
fn order(&mut self) {
self.try_swap();
let rhs = self.rhs();
let ty = self.rhs_type();
let r_legal = self.inner.kinds.to_rhs_binop();
let r_kind = self.mir().as_operand_kind(rhs).as_rhs();
if r_legal.contains(r_kind) {
} else if r_legal.contains(BinaryOperandFlags::RhsReg) {
self.rhs.0 = self.mir_mut().to_reg(ty, rhs);
} else if r_legal.contains(BinaryOperandFlags::RhsMem) {
self.rhs.0 = self.mir_mut().gen_spill_value(rhs);
} else {
unreachable!()
}
let rhs = self.rhs();
self.canonicalise_lhs_with_reduced_kinds(
self.inner
.kinds
.reduce_with_rhs(self.mir().as_operand_kind(rhs).as_rhs()),
);
}
}
impl<'a> BinaryOperands<'a> {
fn new(mir: &'a mut Mir, commutative: bool, kinds: OperandKinds) -> Self {
Self {
mir,
commutative,
kinds,
}
}
fn new_add_or_and_xor_adc(mir: &'a mut Mir) -> Self {
Self::new(mir, true, OperandKinds::add())
}
fn new_sub_sbb_cmp(mir: &'a mut Mir) -> Self {
Self::new(mir, false, OperandKinds::add())
}
fn new_sse(mir: &'a mut Mir) -> Self {
Self::new(mir, true, OperandKinds::sse())
}
fn new_mul(mir: &'a mut Mir) -> Self {
Self::new(mir, true, OperandKinds::mul())
}
fn new_imul(mir: &'a mut Mir) -> Self {
Self::new(mir, true, OperandKinds::imul())
}
fn new_div_idiv_rem_irem(mir: &'a mut Mir) -> Self {
Self::new(mir, false, OperandKinds::mul())
}
fn new_shift(mir: &'a mut Mir) -> Self {
Self::new(mir, false, OperandKinds::shift())
}
fn wrangle(self, lhs: u32, lhs_type: Type, rhs: u32, rhs_type: Type) -> (u32, u32) {
let mut runner = BinaryOperandsRunner {
inner: self,
lhs: (lhs, lhs_type),
rhs: (rhs, rhs_type),
};
runner.order();
runner.lhs_rhs()
}
}
pub struct Mir {
name: StringsIndex,
pub nodes: Vec<Inst>,
pub data: Vec<Data>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct NodeRef(pub u32);
impl From<u32> for NodeRef {
fn from(value: u32) -> Self {
Self(value)
}
}
impl NodeRef {
fn index(self) -> usize {
self.0 as usize
}
fn into_reference_range(self) -> std::ops::Range<(NodeRef, NodeRef)> {
(self, Self::MIN)..(self, Self::MAX)
}
#[allow(dead_code)]
fn inclusive_start(self) -> (Self, Self) {
(self, Self::MIN)
}
fn exclusive_start(self) -> (Self, Self) {
(self, Self::MAX)
}
#[allow(dead_code)]
fn inclusive_end(self) -> (Self, Self) {
(self, Self::MAX)
}
#[allow(dead_code)]
fn exclusive_end(self) -> (Self, Self) {
(self, Self::MIN)
}
/// invalid pseudo-handle to the past-the-end node.
pub const MAX: Self = NodeRef(u32::MAX);
pub const MIN: Self = NodeRef(0);
}
impl Display for NodeRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "%{}", self.0)
}
}
impl Mir {
pub fn get_node(&self, node: NodeRef) -> (Inst, Data) {
(self.nodes[node.index()], self.data[node.index()])
}
#[allow(dead_code)]
pub fn get_node_mut(&mut self, node: NodeRef) -> (&mut Inst, &mut Data) {
(&mut self.nodes[node.index()], &mut self.data[node.index()])
}
pub fn set_node_data(&mut self, node: NodeRef, data: Data) {
self.data[node.index()] = data;
}
fn indices(&self) -> impl Iterator<Item = NodeRef> {
(0..self.nodes.len() as u32).map(|n| NodeRef::from(n))
}
}
impl Mir {
pub fn new(name: StringsIndex) -> Mir {
Self {
name,
nodes: Vec::new(),
data: Vec::new(),
}
}
fn as_operand_kind(&self, node: u32) -> OperandKind {
if self.is_imm(node) {
OperandKind::Imm
} else if self.is_register(node) {
OperandKind::Reg
} else {
OperandKind::Mem
}
}
pub fn to_reg(&mut self, ty: Type, node: u32) -> u32 {
if !self.is_register(node) {
self.gen_load_register(ty, node)
} else {
node
}
}
pub fn type_of_node(&self, node: u32) -> Option<Type> {
self.nodes[node as usize].value_type()
}
pub fn is_register(&self, node: u32) -> bool {
self.nodes[node as usize].result_is_register()
}
pub fn is_imm(&self, node: u32) -> bool {
match self.nodes[node as usize] {
Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord
| Inst::ConstantSinglePrecision
| Inst::ConstantDoublePrecision => true,
_ => false,
}
}
pub fn push(&mut self, inst: Inst, data: Data) -> u32 {
let node = self.nodes.len() as u32;
self.nodes.push(inst);
self.data.push(data);
node
}
pub fn gen_u8(&mut self, value: u8) -> u32 {
self.push(Inst::ConstantByte, Data::imm8(value))
}
pub fn gen_u16(&mut self, value: u16) -> u32 {
self.push(Inst::ConstantWord, Data::imm16(value))
}
pub fn gen_u32(&mut self, value: u32) -> u32 {
self.push(Inst::ConstantDWord, Data::imm32(value))
}
pub fn gen_u64(&mut self, value: u64) -> u32 {
self.push(Inst::ConstantQWord, Data::imm64(value))
}
pub fn gen_f32(&mut self, bits: u32) -> u32 {
self.push(Inst::ConstantSinglePrecision, Data::imm32(bits))
}
pub fn gen_f64(&mut self, bits: u64) -> u32 {
self.push(Inst::ConstantDoublePrecision, Data::imm64(bits))
}
pub fn gen_zero_extend(&mut self, ty: Type, src: u32) -> u32 {
self.push(Inst::ZeroExtend(ty), Data::node(src))
}
pub fn gen_sign_extend(&mut self, ty: Type, src: u32) -> u32 {
self.push(Inst::SignExtend(ty), Data::node(src))
}
pub fn gen_is_zero(&mut self, ty: Type, src: u32) -> u32 {
self.push(Inst::IsZero(ty), Data::node(src))
}
pub fn gen_cmp(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = if ty.is_floating() {
BinaryOperands::new_sse(self).wrangle(lhs, ty, rhs, ty)
} else {
BinaryOperands::new_sub_sbb_cmp(self).wrangle(lhs, ty, rhs, ty)
};
self.push(Inst::Cmp(ty), Data::binary(lhs, rhs))
}
pub fn gen_jmp(&mut self, to: u32) -> u32 {
self.push(Inst::Jump, Data::node(to))
}
pub fn gen_branch(&mut self, on: u32, lhs: u32, rhs: u32) -> u32 {
self.push(Inst::Branch(on), Data::binary(lhs, rhs))
}
pub fn gen_phi2(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
self.push(Inst::Phi2(ty), Data::binary(lhs, rhs))
}
pub fn gen_cmp_byte(
&mut self,
ty: Type,
signed: bool,
ordering: Ordering,
invert: bool,
lhs: u32,
rhs: u32,
) -> u32 {
match ordering {
Ordering::Less => {
if invert {
self.gen_is_ge(ty, signed, lhs, rhs)
} else {
self.gen_is_lt(ty, signed, lhs, rhs)
}
}
Ordering::Equal => {
if invert {
self.gen_is_neq(ty, signed, lhs, rhs)
} else {
self.gen_is_eq(ty, signed, lhs, rhs)
}
}
Ordering::Greater => {
if invert {
self.gen_is_le(ty, signed, lhs, rhs)
} else {
self.gen_is_gt(ty, signed, lhs, rhs)
}
}
}
}
pub fn gen_is_eq(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsEq(signed), Data::node(cmp))
}
pub fn gen_is_neq(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsNeq(signed), Data::node(cmp))
}
pub fn gen_is_gt(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsGt(signed), Data::node(cmp))
}
pub fn gen_is_lt(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsLt(signed), Data::node(cmp))
}
pub fn gen_is_ge(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsGe(signed), Data::node(cmp))
}
pub fn gen_is_le(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
let cmp = self.gen_cmp(ty, lhs, rhs);
self.push(Inst::IsLe(signed), Data::node(cmp))
}
pub fn gen_load_register(&mut self, ty: Type, src: u32) -> u32 {
self.push(Inst::LoadRegister(ty), Data::node(src))
}
pub fn gen_spill_value(&mut self, src: u32) -> u32 {
let ty = self.type_of_node(src).unwrap();
let size = ty.bytes();
let alloc = self.gen_alloca(size, size);
_ = self.gen_store(ty, src, alloc);
alloc
}
pub fn gen_label(&mut self, name: StringsIndex) -> u32 {
self.push(Inst::Label, Data::index(name))
}
pub fn gen_alloca(&mut self, size: u32, align: u32) -> u32 {
self.push(Inst::Alloca, Data::binary(size, align))
}
pub fn gen_load(&mut self, ty: Type, src: u32) -> u32 {
self.push(Inst::Load(ty), Data::node(src))
}
pub fn gen_get_element_ptr(&mut self, ty: Type, src: u32, index: u32) -> u32 {
self.push(Inst::GetElementPtr(ty), Data::binary(src, index))
}
pub fn gen_store(&mut self, ty: Type, src: u32, dst: u32) -> u32 {
self.push(Inst::Store(ty), Data::binary(src, dst))
}
pub fn gen_param(&mut self, ty: Type) -> u32 {
self.push(Inst::Parameter(ty), Data::none())
}
/// truncates a value `src` of `mir::Type` `ty` to `bits` bits, while preserving the sign
pub fn gen_truncate_integer(&mut self, src: u32, ty: Type, signed: bool, bits: u16) -> u32 {
// example: u4 -> (byte, false, 4)
// 1111_1111 << 4 = 1111_0000
// mask = 0000_1111;
// shift = 8 - 4;
let shift = ty.bits() as u8 - bits as u8;
let mask = !(!0u64 << bits);
let mask = match ty {
Type::Byte => self.gen_u8(mask as u8),
Type::Word => self.gen_u16(mask as u16),
Type::SinglePrecision | Type::DWord => self.gen_u32(mask as u32),
Type::DoublePrecision | Type::QWord => self.gen_u64(mask as u64),
};
let masked = self.gen_bitand(ty, src, mask);
if signed {
// sign extend the high bits of the mir::Type
let shift = self.gen_u8(shift);
let tmp = self.gen_shl(ty, masked, shift);
let tmp = self.gen_sar(ty, tmp, shift);
tmp
} else {
masked
}
}
// go from a lower bit type to a higher bit type
// if both types fit into the same mir::Type, then this is a noop
// if signed sign extend, otherwise zero-extend (or zero the register)
// `ty` is the output type
pub fn gen_extend_integer(&mut self, src: u32, from: IntegralType, to: IntegralType) -> u32 {
// example: u4 -> (byte, false, 4)
// 1111_1111 << 4 = 1111_0000
// mask = 0000_1111;
// shift = 8 - 4;
let src_ty = Type::from_bitsize_int(from.bits as u32);
let truncated = self.gen_truncate_integer(src, src_ty, from.signed, from.bits);
let dst_ty = Type::from_bitsize_int(to.bits as u32);
if to.signed {
self.gen_sign_extend(dst_ty, truncated)
} else {
self.gen_zero_extend(dst_ty, truncated)
}
}
fn imm_to_reg(&mut self, src: u32) -> u32 {
if self.is_imm(src) {
// SAFETY: imms have values and thus types
self.gen_load_register(self.type_of_node(src).unwrap(), src)
} else {
src
}
}
pub fn gen_add(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = if ty.is_floating() {
BinaryOperands::new_sse(self).wrangle(lhs, ty, rhs, ty)
} else {
BinaryOperands::new_add_or_and_xor_adc(self).wrangle(lhs, ty, rhs, ty)
};
self.push(Inst::Add(ty), Data::binary(lhs, rhs))
}
pub fn gen_sub(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = if ty.is_floating() {
BinaryOperands::new_sse(self).wrangle(lhs, ty, rhs, ty)
} else {
BinaryOperands::new_sub_sbb_cmp(self).wrangle(lhs, ty, rhs, ty)
};
self.push(Inst::Sub(ty), Data::binary(lhs, rhs))
}
pub fn gen_mul(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
if ty.is_floating() {
self.gen_mul_sse(ty, lhs, rhs)
} else if signed {
self.gen_mul_signed(ty, lhs, rhs)
} else {
self.gen_mul_unsigned(ty, lhs, rhs)
}
}
pub fn gen_mul_sse(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_sse(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::MulSSE(ty), Data::binary(lhs, rhs))
}
pub fn gen_mul_unsigned(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_mul(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::Mul(ty), Data::binary(lhs, rhs))
}
pub fn gen_mul_signed(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_imul(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::MulSigned(ty), Data::binary(lhs, rhs))
}
pub fn gen_div(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
if ty.is_floating() {
self.gen_div_sse(ty, lhs, rhs)
} else if signed {
self.gen_div_signed(ty, lhs, rhs)
} else {
self.gen_div_unsigned(ty, lhs, rhs)
}
}
pub fn gen_div_sse(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_sse(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::DivSSE(ty), Data::binary(lhs, rhs))
}
pub fn gen_div_unsigned(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_div_idiv_rem_irem(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::Div(ty), Data::binary(lhs, rhs))
}
pub fn gen_div_signed(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_div_idiv_rem_irem(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::DivSigned(ty), Data::binary(lhs, rhs))
}
pub fn gen_rem(&mut self, ty: Type, signed: bool, lhs: u32, rhs: u32) -> u32 {
if ty.is_floating() {
self.gen_rem_fp(ty, lhs, rhs)
} else if signed {
self.gen_rem_signed(ty, lhs, rhs)
} else {
self.gen_rem_unsigned(ty, lhs, rhs)
}
}
pub fn gen_rem_fp(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) =
BinaryOperands::new(self, false, OperandKinds::any_mem_reg()).wrangle(lhs, ty, rhs, ty);
self.push(Inst::RemFP(ty), Data::binary(lhs, rhs))
}
pub fn gen_rem_unsigned(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_div_idiv_rem_irem(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::Rem(ty), Data::binary(lhs, rhs))
}
pub fn gen_rem_signed(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_div_idiv_rem_irem(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::RemSigned(ty), Data::binary(lhs, rhs))
}
pub fn gen_bitand(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_add_or_and_xor_adc(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::BitAnd(ty), Data::binary(lhs, rhs))
}
pub fn gen_bitor(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_add_or_and_xor_adc(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::BitOr(ty), Data::binary(lhs, rhs))
}
pub fn gen_bitxor(&mut self, ty: Type, lhs: u32, rhs: u32) -> u32 {
let (lhs, rhs) = BinaryOperands::new_add_or_and_xor_adc(self).wrangle(lhs, ty, rhs, ty);
self.push(Inst::BitXOr(ty), Data::binary(lhs, rhs))
}
pub fn gen_negate(&mut self, ty: Type, src: u32) -> u32 {
let src = self.imm_to_reg(src);
self.push(Inst::Negate(ty), Data::node(src))
}
pub fn gen_not(&mut self, ty: Type, src: u32) -> u32 {
let src = self.imm_to_reg(src);
self.push(Inst::Not(ty), Data::node(src))
}
#[doc(alias = "gen_shift_left")]
pub fn gen_shl(&mut self, ty: Type, src: u32, shift: u32) -> u32 {
let (src, shift) = BinaryOperands::new_shift(self).wrangle(src, ty, shift, Type::Byte);
self.push(Inst::ShiftLeft(ty), Data::binary(src, shift))
}
#[doc(alias = "gen_shift_right")]
pub fn gen_shr(&mut self, ty: Type, src: u32, shift: u32) -> u32 {
let (src, shift) = BinaryOperands::new_shift(self).wrangle(src, ty, shift, Type::Byte);
self.push(Inst::ShiftRightUnsigned(ty), Data::binary(src, shift))
}
#[doc(alias = "gen_shift_right_signed")]
pub fn gen_sar(&mut self, ty: Type, src: u32, shift: u32) -> u32 {
let (src, shift) = BinaryOperands::new_shift(self).wrangle(src, ty, shift, Type::Byte);
self.push(Inst::ShiftRightSigned(ty), Data::binary(src, shift))
}
pub fn gen_ret_val(&mut self, ty: Type, val: u32) -> u32 {
self.push(Inst::ReturnValue(ty), Data::node(val))
}
pub fn gen_ret(&mut self) -> u32 {
self.push(Inst::Return, Data::none())
}
}
impl Mir {
fn render_node<W: core::fmt::Write>(
&self,
w: &mut W,
strings: &StringTable,
liveness: &Liveness,
node: u32,
) -> core::fmt::Result {
let idx = node as usize;
let inst = self.nodes[idx];
let data = self.data[idx];
if let Some(reg) = liveness.get_register(node.into()) {
write!(w, "({reg}) ")?;
}
match inst {
Inst::Label => writeln!(w, "%{node} = label {}", strings.get_str(data.as_index())),
Inst::ConstantBytes => writeln!(
w,
"%{node} = bytes({:x?})",
strings.get_bytes(data.as_index())
),
Inst::ConstantByte => writeln!(w, "%{node} = imm8({:x?})", data.as_imm8()),
Inst::ConstantWord => writeln!(w, "%{node} = imm16({:x?})", data.as_imm16()),
Inst::ConstantDWord => writeln!(w, "%{node} = imm32({:x?})", data.as_imm32()),
Inst::ConstantQWord => writeln!(w, "%{node} = imm64({:x?})", data.as_imm64()),
Inst::ConstantSinglePrecision => writeln!(
w,
"%{node} = imm32fp({:x?})",
f32::from_bits(data.as_imm32())
),
Inst::ConstantDoublePrecision => writeln!(
w,
"%{node} = imm64fp({:x?})",
f64::from_bits(data.as_imm64())
),
Inst::LoadRegister(ty) => {
let src = data.as_node();
writeln!(w, "%{node} = load register {ty} %{src}")
}
Inst::ExternRef => writeln!(w, "%{node} = extern %%{}", data.as_node()),
Inst::Alloca => {
let (size, align) = data.as_binary();
writeln!(w, "%{node} = alloca {size}, {align}")
}
Inst::GetElementPtr(ty) => {
let (ptr, idx) = data.as_binary();
writeln!(w, "%{node} = get element ptr {ty}, ptr %{ptr}, idx {idx}")
}
Inst::Load(ty) => {
writeln!(w, "%{node} = load {ty}, ptr %{}", data.as_node())
}
Inst::Store(ty) => {
let (src, dst) = data.as_binary();
writeln!(w, "%{node} = store {ty}, ptr %{dst}, {ty} %{src}")
}
Inst::Parameter(ty) => {
writeln!(w, "%{node} = param {ty}")
}
Inst::Add(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = add {ty} %{lhs}, {ty} %{rhs}")
}
Inst::Sub(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = sub {ty} %{lhs}, {ty} %{rhs}")
}
Inst::Mul(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = mul {ty} %{lhs}, {ty} %{rhs}")
}
Inst::MulSigned(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = signed mul {ty} %{lhs}, {ty} %{rhs}")
}
Inst::Div(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = div {ty} %{lhs}, {ty} %{rhs}")
}
Inst::DivSigned(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = signed div {ty} %{lhs}, {ty} %{rhs}")
}
Inst::Rem(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = rem {ty} %{lhs}, {ty} %{rhs}")
}
Inst::RemSigned(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = signed rem {ty} %{lhs}, {ty} %{rhs}")
}
Inst::MulSSE(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = mulss {ty} %{lhs}, {ty} %{rhs}")
}
Inst::DivSSE(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = divss {ty} %{lhs}, {ty} %{rhs}")
}
Inst::RemFP(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = fp rem {ty} %{lhs}, {ty} %{rhs}")
}
Inst::BitAnd(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = bitand {ty} %{lhs}, {ty} %{rhs}")
}
Inst::BitOr(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = bitor {ty} %{lhs}, {ty} %{rhs}")
}
Inst::BitXOr(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = bitxor {ty} %{lhs}, {ty} %{rhs}")
}
Inst::Negate(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = negate {ty} %{lhs}")
}
Inst::Not(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = bitwise not {ty} %{lhs}")
}
Inst::SignExtend(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = sign extend {ty} %{lhs}")
}
Inst::ZeroExtend(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = zero extend {ty} %{lhs}")
}
Inst::IsZero(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is zero {ty} %{lhs}")
}
Inst::Cmp(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = cmp {ty} %{lhs} {ty} %{rhs}")
}
Inst::IsEq(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is eq %{lhs}")
}
Inst::IsNeq(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is neq %{lhs}")
}
Inst::IsGt(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is gt %{lhs}")
}
Inst::IsLt(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is lt %{lhs}")
}
Inst::IsGe(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is ge %{lhs}")
}
Inst::IsLe(_) => {
let lhs = data.as_node();
writeln!(w, "%{node} = is le %{lhs}")
}
Inst::ShiftLeft(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = shift left {ty} %{lhs}, {ty} %{rhs}")
}
Inst::ShiftRightUnsigned(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = shift right {ty} %{lhs}, {ty} %{rhs}")
}
Inst::ShiftRightSigned(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = signed shift right {ty} %{lhs}, {ty} %{rhs}")
}
Inst::ReturnValue(ty) => {
let lhs = data.as_node();
writeln!(w, "%{node} = return {ty} %{lhs}")
}
Inst::Return => {
writeln!(w, "%{node} = return")
}
Inst::Jump => {
let lhs = data.as_node();
writeln!(w, "%{node} = jmp %{lhs}")
}
Inst::Branch(condition) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = br bool %{condition}, [%{lhs}, %{rhs}]")
}
Inst::Phi2(ty) => {
let (lhs, rhs) = data.as_binary();
writeln!(w, "%{node} = phi2 [{ty} %{lhs}, {ty} %{rhs}]")
}
}
}
pub fn render<W: core::fmt::Write>(
&self,
w: &mut W,
strings: &StringTable,
) -> core::fmt::Result {
let reg_alloc = self.build_liveness();
for node in 0..self.nodes.len() {
self.render_node(w, strings, &reg_alloc, node as u32)?;
}
Ok(())
}
pub fn display<'a, 'b>(&'a self, strings: &'b StringTable) -> DisplayMir<'a, 'b> {
DisplayMir { mir: self, strings }
}
}
pub use liveness::Liveness;
pub mod liveness {
use super::*;
pub struct Liveness {
register_map: BTreeMap<NodeRef, Register>,
inference_graph: petgraph::graph::UnGraph<(), ()>,
}
impl Liveness {
pub fn get_register(&self, node: NodeRef) -> Option<Register> {
self.register_map.get(&node).cloned()
}
pub fn dirty_registers(&self) -> std::collections::btree_map::Values<NodeRef, Register> {
self.register_map.values()
}
pub fn get_scratch_register_at_node(&self, node: NodeRef) -> Option<Register> {
let dirty = self
.inference_graph
.neighbors(node.0.into())
.filter_map(|n| self.register_map.get(&NodeRef(n.index() as u32)))
.cloned()
.collect::<Vec<_>>();
Register::SYSV_SCRATCH_GPRS
.into_iter()
.filter(|c| !dirty.contains(c))
.next()
}
pub fn is_register_in_use_at_node(&self, node: NodeRef, reg: Register) -> bool {
self.inference_graph
.neighbors(node.0.into())
.filter_map(|n| self.register_map.get(&NodeRef(n.index() as u32)))
.find(|&&r| r.parent_reg() == reg.parent_reg())
.is_some()
}
}
pub struct LivenessBuilder<'a> {
mir: &'a Mir,
// tree of (node, referenced_by) pairs.
// a full range for each node is (node, NodeRef::MIN)..(node, NodeRef::MAX)
// QUESTION: do I want to treat every interval in this tree a separate node to color?
// or just the (node, NodeRef::MIN)..(node, NodeRef::MAX) range?
// references: BTreeSet<(NodeRef, NodeRef)>,
inference_graph: petgraph::graph::UnGraph<(), ()>,
// list of preferred colors by nodes, either because they output to some
// register like mul/div or because the write to one of their inputs.
// interesting to consider optimisations like i >>= s being turned into shl
// mem, cl, while i >> s is turned into shl reg, cl ...
// preferred_color: BTreeMap<NodeRef, Register>,
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
enum Color {
#[default]
Unassigned,
Tentative(Register),
Final(Register),
}
impl Color {
fn color(self) -> Option<Register> {
match self {
Color::Unassigned => None,
Color::Tentative(color) | Color::Final(color) => Some(color),
}
}
}
impl<'a> LivenessBuilder<'a> {
pub fn new(mir: &'a Mir) -> Self {
let references = Self::build_references(mir);
let intervals = mir.indices().filter_map(|node| {
let interval = references
.range(node.into_reference_range())
.map(|&(_, to)| to)
.reduce(|acc, to| acc.max(to));
interval.map(|max| (node, max))
});
let mut edges = HashSet::<(u32, u32)>::new();
// eprint!("intervals: [");
for (from, to) in intervals {
// eprint!("({from})..({to}), ");
if !mir.is_register(from.0) {
continue;
}
for &(other, _) in references.range(from.exclusive_start()..to.exclusive_end()) {
edges.insert((from.0, other.0));
}
}
// eprintln!("]");
let inference_graph = petgraph::graph::UnGraph::<(), ()>::from_edges(edges.into_iter());
Self {
mir,
inference_graph,
// references,
}
}
pub fn build(self) -> Liveness {
let preferred = self.pre_color_colors();
let Self {
mir,
inference_graph,
..
} = self;
let mut colorizer = Colorizer {
mir,
preferred,
graph: inference_graph,
colors: BTreeMap::new(),
};
// // prepass: assign preferred colours for in/out values and specific
// // instructions like mul/div which require one operand to be in rax
colorizer.prepass();
// for &node in references.keys().rev() {
// if !self.nodes[node as usize].result_is_register() {
// continue;
// }
// colorizer.color_node(node.into());
// }
let register_map = colorizer.colorise();
let Colorizer { graph, .. } = colorizer;
Liveness {
register_map,
inference_graph: graph,
}
}
fn pre_color_colors(&self) -> BTreeMap<NodeRef, Register> {
/* do i want to find colors from the bottom up?
* - on one hand, consumers with in/outs like mul/div are below and want their inputs in some specific register
* - on the other hand, producers like mul/div are above and want to dictate their output registers
* i think the answer is coloring mul/div first, specifically, then collapsing from there.
*/
let mut want_colors = BTreeMap::<NodeRef, Register>::new();
use Register::*;
let mut in_colors = [r9, r8, rcx, rdx, rdi, rsi].to_vec();
let mut in_colors_sse = [xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0].to_vec();
for node in self.mir.indices() {
let want_color = match self.mir.get_node(node).0 {
//
Inst::Parameter(ty) => {
if ty.is_floating() {
in_colors_sse.pop()
} else {
in_colors.pop()
}
}
Inst::ShiftLeft(_)
| Inst::ShiftRightSigned(_)
| Inst::ShiftRightUnsigned(_) => Some(rcx),
Inst::Mul(_) | Inst::Div(_) | Inst::DivSigned(_) => Some(rax),
Inst::Rem(_) | Inst::RemSigned(_) => Some(rdx),
Inst::ReturnValue(ty) => {
if ty.is_floating() {
Some(xmm0)
} else {
Some(rax)
}
}
_ => {
if let Some(dst) = self.mir.dst_node(node) {
// check if there is interference
if self
.inference_graph
.find_edge(dst.0.into(), node.0.into())
.is_none()
{
// want the same color as our dst node to avoid copying
want_colors.get(&dst).cloned()
} else {
None
}
} else {
None
}
}
};
if let Some(color) = want_color {
want_colors.insert(node, color);
}
}
want_colors
}
fn build_references(mir: &Mir) -> BTreeSet<(NodeRef, NodeRef)> {
let mut references = BTreeSet::new();
for node in mir.indices() {
Self::reference_node_operands(mir, &mut references, node);
}
references
}
fn reference_node_operands(
mir: &Mir,
references: &mut BTreeSet<(NodeRef, NodeRef)>,
node: NodeRef,
) {
let (inst, data) = mir.get_node(node);
match inst {
Inst::ReturnValue(_)
| Inst::SignExtend(_)
| Inst::ZeroExtend(_)
| Inst::Negate(_)
| Inst::Not(_)
| Inst::IsZero(_)
| Inst::IsEq(_)
| Inst::IsNeq(_)
| Inst::IsGt(_)
| Inst::IsGe(_)
| Inst::IsLt(_)
| Inst::IsLe(_)
| Inst::Load(_)
| Inst::LoadRegister(_) => {
references.insert((data.as_noderef(), node));
}
Inst::Branch(condition) => {
references.insert((NodeRef(condition), node));
}
Inst::Cmp(_)
| Inst::Store(_)
| Inst::Add(_)
| Inst::Sub(_)
| Inst::Mul(_)
| Inst::MulSigned(_)
| Inst::Div(_)
| Inst::DivSigned(_)
| Inst::Rem(_)
| Inst::RemSigned(_)
| Inst::MulSSE(_)
| Inst::DivSSE(_)
| Inst::RemFP(_)
| Inst::BitAnd(_)
| Inst::BitOr(_)
| Inst::BitXOr(_)
| Inst::Phi2(_)
| Inst::ShiftLeft(_)
| Inst::ShiftRightSigned(_)
| Inst::ShiftRightUnsigned(_) => {
let (lhs, rhs) = data.as_binary_noderefs();
references.insert((lhs, node));
references.insert((rhs, node));
}
Inst::GetElementPtr(_) => {
let (src, _) = data.as_binary_noderefs();
references.insert((src, node));
}
// these instructions have no inputs
// don't want a wildcard match here to make sure new instructions
// are handled here when they are added.
Inst::Return
| Inst::Jump
| Inst::Parameter(_)
| Inst::Label
| Inst::ConstantBytes
| Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord
| Inst::ConstantSinglePrecision
| Inst::ConstantDoublePrecision
| Inst::ExternRef
| Inst::Alloca => {}
}
}
}
struct Colorizer<'a> {
mir: &'a Mir,
graph: petgraph::graph::UnGraph<(), ()>,
colors: BTreeMap<NodeRef, Color>,
preferred: BTreeMap<NodeRef, Register>,
}
impl<'a> Colorizer<'a> {
fn node_colors(&self, node: petgraph::graph::NodeIndex) -> &[Register] {
let colors = if self.mir.nodes[node.index()]
.value_type()
.map(|t| t.is_floating())
== Some(true)
{
&Register::SSE[..]
} else {
&Register::GPR[..]
};
colors
}
fn prepass(&mut self) {
// parameters are first in line
let keys = self.preferred.keys().cloned().collect::<Vec<_>>();
for node in keys {
self.precolor_node(node.0.into());
}
}
fn precolor_node(&mut self, node: petgraph::graph::NodeIndex) {
// prepass: assign preferred colours for in/out values and specific
// instructions like mul/div which require one operand to be in rax
let node_u32 = node.index() as u32;
let noderef = NodeRef(node_u32);
// only apply color here if we have a preference
if let Some(preferred_color) = self.preferred.remove(&noderef) {
let mut clique_colors = self
.graph
.neighbors(node)
.filter_map(|n| self.colors.get(&NodeRef(n.index() as u32)).cloned());
if clique_colors
.find(|color| color.color() == Some(preferred_color))
.is_none()
{
self.colors
.insert(noderef, Color::Tentative(preferred_color));
}
};
// .chain(self.node_colors(node).into_iter().cloned());
}
fn color_node(&mut self, node: petgraph::graph::NodeIndex) {
// final pass:
// look at clique colors and prefer to steal colors from
// tentatively colored nodes. this results in preferential
// coloring depending on the order of the prepass.
let node_u32 = node.index() as u32;
let noderef = NodeRef(node_u32);
let clique_colors = self
.graph
.neighbors(node)
.filter_map(|n| self.colors.get(&NodeRef(n.index() as u32)).cloned())
.collect::<BTreeSet<_>>();
let colors = self
.node_colors(node)
.into_iter()
.filter(|&&r| !clique_colors.contains(&Color::Final(r)))
.cloned()
.collect::<BTreeSet<_>>();
// eprintln!("coloring %{node_u32}:");
// eprintln!("\twants: {:?}", self.colors.get(&node_u32));
// eprintln!("\tclique: {clique_colors:?}");
// eprintln!("\tcandidates: {colors:?}");
let color = match self.colors.entry(noderef) {
Entry::Vacant(v) => {
// here we want to first check clique_colors with tentative coloring.
let color = colors
.into_iter()
.find_or_first(|&c| !clique_colors.contains(&Color::Tentative(c)))
.expect("ran out of registers :(");
v.insert(Color::Final(color));
color
}
Entry::Occupied(mut e) => {
// we prefer to steal
variant!(e.get() => &Color::Tentative(reg));
let color = colors
.into_iter()
.find_or_first(|&c| c == reg)
.expect("ran out of registers :(");
e.insert(Color::Final(color));
color
}
};
// if this node has a dst node (an operand that is both source and
// destination), give it our tentative color
if let Some(dst) = self.mir.dst_node(noderef) {
_ = self.colors.try_insert(dst, Color::Tentative(color));
}
// for any Phi(y_1,y_2, y_n) give y_i the color of Phi
// reasonably certain that this will never fail to color all phi nodes the same color.
if let Some(inputs) = self.mir.get_phi_inputs(noderef) {
eprintln!("coloring {inputs:?} {color}");
for node in inputs {
_ = self.colors.insert(node, Color::Tentative(color));
}
}
}
fn colorise(&mut self) -> BTreeMap<NodeRef, Register> {
for node in self.mir.indices() {
if !self.mir.is_register(node.0) {
continue;
}
self.color_node(node.0.into());
}
self.colors
.iter()
.filter_map(|(&node, &c)| match c {
Color::Final(reg) => Some((node, reg)),
_ => None,
})
.collect()
}
}
}
impl Mir {
fn get_phi_inputs(&self, node: NodeRef) -> Option<Vec<NodeRef>> {
let (inst, data) = self.get_node(node);
match inst {
Inst::Phi2(_) => {
let (lhs, rhs) = data.as_binary_noderefs();
Some([lhs, rhs].to_vec())
}
_ => None,
}
}
/// returns the in/out operand, if it exists: example would be (%node = add rax, rcx) -> rax
fn dst_node(&self, node: NodeRef) -> Option<NodeRef> {
// for each node, look at the dst node and see if it has preferred
// color, then also prefer that color.
let (inst, data) = self.get_node(node);
match inst {
Inst::Add(_)
| Inst::Sub(_)
| Inst::MulSigned(_)
| Inst::MulSSE(_)
| Inst::DivSSE(_)
| Inst::BitAnd(_)
| Inst::BitOr(_)
| Inst::BitXOr(_)
| Inst::ShiftLeft(_)
| Inst::ShiftRightSigned(_)
| Inst::ShiftRightUnsigned(_) => {
let (lhs, _) = data.as_binary_noderefs();
Some(lhs)
}
Inst::Not(_) | Inst::Negate(_) => {
let lhs = data.as_noderef();
Some(lhs)
}
Inst::Parameter(_)
| Inst::GetElementPtr(_)
| Inst::LoadRegister(_)
| Inst::Load(_)
| Inst::Label
| Inst::ConstantBytes
| Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord
| Inst::ConstantSinglePrecision
| Inst::ConstantDoublePrecision
| Inst::ExternRef
| Inst::Alloca
| Inst::Jump
| Inst::Return
| Inst::Store(_)
| Inst::ReturnValue(_)
| Inst::SignExtend(_)
| Inst::ZeroExtend(_)
| Inst::Mul(_)
| Inst::Div(_)
| Inst::RemFP(_)
| Inst::DivSigned(_)
| Inst::Rem(_)
| Inst::RemSigned(_)
| Inst::Cmp(_)
| Inst::Branch(_)
| Inst::Phi2(_)
| Inst::IsEq(_)
| Inst::IsNeq(_)
| Inst::IsGt(_)
| Inst::IsLt(_)
| Inst::IsGe(_)
| Inst::IsLe(_)
| Inst::IsZero(_) => None,
}
}
pub fn build_liveness(&self) -> Liveness {
LivenessBuilder::new(self).build()
}
}
use crate::variant;
#[derive(Debug, PartialEq, Eq)]
struct StackMem {
offset: u32,
size: u32,
}
impl StackMem {
fn new(offset: u32, size: u32) -> Self {
Self { offset, size }
}
}
impl core::fmt::Display for StackMem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{} ptr [rbp - {}]",
Type::from_bytesize_int(self.size),
self.offset
)
}
}
#[derive(Debug, PartialEq, Eq)]
enum ImmRegMem {
Byte(u8),
Word(u16),
DWord(u32),
QWord(u64),
Mem(StackMem),
Rip(RipRelative),
Reg(Register),
}
impl ImmRegMem {
fn is_floating(&self) -> bool {
match self {
ImmRegMem::Reg(reg) => reg.is_sse(),
_ => false,
}
}
fn byte_width(&self) -> u32 {
match self {
ImmRegMem::Byte(_) => 1,
ImmRegMem::Word(_) => 2,
ImmRegMem::DWord(_) => 4,
ImmRegMem::QWord(_) => 8,
ImmRegMem::Mem(mem) => mem.size,
ImmRegMem::Rip(rip) => rip.ty().bytes(),
ImmRegMem::Reg(reg) => reg.byte_size(),
}
}
fn occupy_same_register(&self, reg: Register) -> bool {
match self {
&ImmRegMem::Reg(r) => r.parent_reg() == reg.parent_reg(),
_ => false,
}
}
}
impl From<Register> for ImmRegMem {
fn from(value: Register) -> Self {
Self::Reg(value)
}
}
impl core::fmt::Display for ImmRegMem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use ImmRegMem::*;
match self {
Byte(v) => write!(f, "0x{v:x}"),
Word(v) => write!(f, "0x{v:x}"),
DWord(v) => write!(f, "0x{v:x}"),
QWord(v) => write!(f, "0x{v:x}"),
Rip(rip) => write!(f, "{rip}"),
Mem(mem) => write!(f, "{mem}"),
Reg(reg) => write!(f, "{reg}"),
}
}
}
#[derive(Debug, PartialEq, Eq)]
enum RipRelative {
Label(Type, String),
#[allow(dead_code)]
Offset(Type, i32),
}
impl RipRelative {
fn ty(&self) -> Type {
match self {
RipRelative::Label(ty, _) => *ty,
RipRelative::Offset(ty, _) => *ty,
}
}
}
impl core::fmt::Display for RipRelative {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RipRelative::Label(ty, label) => write!(f, "{} ptr [rip + {label}]", ty.int_repr()),
RipRelative::Offset(ty, offset) => {
write!(f, "{} ptr [rip + {offset}]", ty.int_repr())
}
}
}
}
#[derive(Debug)]
pub struct Function {
name: StringsIndex,
constants: BTreeMap<usize, String>,
branches: BTreeMap<NodeRef, String>,
current_branch: NodeRef,
stack_offset: u32,
dirty_registers: BTreeSet<Register>,
}
// predefined constants: 0F32, 0F64, NEG1F32, NEG1F64
impl Function {
fn new(name: StringsIndex) -> Self {
let current_branch = NodeRef::MIN;
let branches = BTreeMap::from([(current_branch, String::new())]);
Self {
name,
constants: BTreeMap::new(),
branches,
current_branch,
stack_offset: 0,
dirty_registers: BTreeSet::new(),
}
}
#[allow(dead_code)]
fn dirty_register(&mut self, reg: Register) {
self.dirty_registers.insert(reg);
}
fn create_new_branch(&mut self, node: NodeRef) {
self.current_branch = node;
self.branches.insert(node, String::new());
}
fn get_constant_label(&self, i: usize) -> String {
// TODO: make this use the ST to retrieve the function name,
// or intern constants.
let mut hasher = std::hash::DefaultHasher::new();
self.constants.get(&i).unwrap().hash(&mut hasher);
let constant = hasher.finish() as u16;
format!(".{i}_{:x}", constant)
}
fn add_constant(&mut self, i: usize, content: String) {
_ = self.constants.try_insert(i, content);
}
fn add_constant_from_inst_and_data(
&mut self,
i: usize,
inst: Inst,
data: Data,
strings: &StringTable,
) {
match inst {
Inst::ConstantBytes => {
_ = strings;
todo!()
}
Inst::ConstantByte => {
self.add_constant(i, format!(".byte {}", data.as_imm8()));
}
Inst::ConstantWord => {
self.add_constant(i, format!(".2byte {}", data.as_imm16()));
}
Inst::ConstantSinglePrecision | Inst::ConstantDWord => {
self.add_constant(i, format!(".4byte {}", data.as_imm32()));
}
Inst::ConstantDoublePrecision | Inst::ConstantQWord => {
self.add_constant(i, format!(".8byte {}", data.as_imm64()));
}
_ => unreachable!(),
}
}
fn alloca(&mut self, size: u32, align: u32) -> u32 {
self.stack_offset += size;
self.stack_offset = self.stack_offset.next_multiple_of(align);
self.stack_offset
}
fn current_branch(&mut self) -> &mut String {
self.branches.get_mut(&self.current_branch).unwrap()
}
pub fn finish<W: core::fmt::Write>(
mut self,
w: &mut W,
strings: &StringTable,
) -> core::fmt::Result {
let name = strings.get_str(self.name).to_owned();
writeln!(w, ".globl {name}")?;
writeln!(w, "{name}:")?;
let saved_registers = self
.dirty_registers
.intersection(&BTreeSet::from_iter(Register::SYSV_CALLEE_SAVED))
.cloned()
.collect::<Vec<_>>();
for reg in saved_registers.iter() {
writeln!(w, "push {reg}")?;
}
writeln!(w, "push rbp")?;
writeln!(w, "mov rbp, rsp")?;
writeln!(w, "sub rsp, {}", self.stack_offset)?;
write!(w, "{}", self.branches.remove(&NodeRef::MIN).unwrap())?;
for (branch, content) in &self.branches {
if name != "main" {
writeln!(w, "{name}_L{}:", branch.0)?;
write!(w, "{content}")?;
}
}
writeln!(w, "{name}__epilogue:")?;
writeln!(w, "mov rsp, rbp")?;
writeln!(w, "pop rbp")?;
for reg in saved_registers.iter().rev() {
writeln!(w, "pop {reg}")?;
}
writeln!(w, "ret")?;
Ok(())
}
}
#[allow(dead_code, unused)]
impl Mir {
fn node_as_operand(
&self,
liveness: &Liveness,
mapping: &BTreeMap<usize, (u32, u32)>,
func: &mut Function,
strings: &StringTable,
node: u32,
) -> ImmRegMem {
let inst = self.nodes[node as usize];
let data = self.data[node as usize];
match inst {
Inst::Label => todo!(),
Inst::ConstantBytes => todo!(),
Inst::ConstantByte => ImmRegMem::Byte(data.as_imm8()),
Inst::ConstantWord => ImmRegMem::Word(data.as_imm16()),
Inst::ConstantDWord => ImmRegMem::DWord(data.as_imm32()),
Inst::ConstantQWord => ImmRegMem::QWord(data.as_imm64()),
Inst::ConstantSinglePrecision => {
func.add_constant_from_inst_and_data(
node as usize,
self.nodes[node as usize],
self.data[node as usize],
strings,
);
let label = func.get_constant_label(node as usize);
ImmRegMem::Rip(RipRelative::Label(Type::DWord, label))
}
Inst::ConstantDoublePrecision => {
func.add_constant_from_inst_and_data(
node as usize,
self.nodes[node as usize],
self.data[node as usize],
strings,
);
let label = func.get_constant_label(node as usize);
ImmRegMem::Rip(RipRelative::Label(Type::QWord, label))
}
Inst::GetElementPtr(ty) => liveness.get_register(node.into()).unwrap().into(),
Inst::Parameter(ty)
| Inst::Phi2(ty)
| Inst::Add(ty)
| Inst::Sub(ty)
| Inst::Mul(ty)
| Inst::MulSigned(ty)
| Inst::Div(ty)
| Inst::DivSigned(ty)
| Inst::Rem(ty)
| Inst::RemSigned(ty)
| Inst::MulSSE(ty)
| Inst::DivSSE(ty)
| Inst::RemFP(ty)
| Inst::BitAnd(ty)
| Inst::BitOr(ty)
| Inst::BitXOr(ty)
| Inst::Negate(ty)
| Inst::Not(ty)
| Inst::ShiftLeft(ty)
| Inst::ShiftRightSigned(ty)
| Inst::ShiftRightUnsigned(ty)
| Inst::SignExtend(ty)
| Inst::ZeroExtend(ty)
| Inst::IsZero(ty)
| Inst::Load(ty)
| Inst::LoadRegister(ty) => ty
.register_width(liveness.get_register(node.into()).unwrap())
.into(),
Inst::IsEq(_)
| Inst::IsGt(_)
| Inst::IsLt(_)
| Inst::IsGe(_)
| Inst::IsLe(_)
| Inst::IsNeq(_) => liveness
.get_register(node.into())
.unwrap()
.into_byte()
.into(),
Inst::Alloca => {
let (offset, size) = *mapping.get(&(node as usize)).unwrap();
ImmRegMem::Mem(StackMem::new(offset, size))
}
Inst::ExternRef => todo!(),
_ => {
unreachable!()
}
}
}
pub fn assemble(&self, strings: &StringTable) -> Result<String, core::fmt::Error> {
use core::fmt::Write;
// mapping if (i, (stack_offset, bytes)) for local stack variables
let mut mapping = BTreeMap::<usize, (u32, u32)>::new();
let mut func = Function::new(self.name);
let name = strings.get_str(self.name).to_owned();
let liveness = self.build_liveness();
func.dirty_registers.extend(liveness.dirty_registers());
let mut float_params = 0;
let mut int_params = 0;
for i in 0..self.nodes.len() {
let node = i as u32;
let inst = self.nodes[i];
let data = self.data[i];
self.render_node(func.current_branch(), strings, &liveness, node);
match inst {
Inst::Label => {
func.create_new_branch(NodeRef(node));
}
Inst::ConstantBytes
| Inst::ConstantByte
| Inst::ConstantWord
| Inst::ConstantDWord
| Inst::ConstantQWord => {}
Inst::ConstantSinglePrecision => {
let bits = data.as_imm32();
func.add_constant(i, format!(".long {bits}"));
}
Inst::ConstantDoublePrecision => {
let bits = data.as_imm64();
func.add_constant(i, format!(".quad {bits}"));
}
Inst::LoadRegister(ty) => {
let src = data.as_node();
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let src = self.node_as_operand(&liveness, &mapping, &mut func, strings, src);
if ty.is_floating() {
match src {
ImmRegMem::Byte(_)
| ImmRegMem::Word(_)
| ImmRegMem::DWord(_)
| ImmRegMem::QWord(_) => {
let mut spill_rax = false;
let scratch = liveness
.get_scratch_register_at_node(node.into())
.unwrap_or_else(|| {
spill_rax = true;
Register::rax
});
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
writeln!(
func.current_branch(),
"mov {}, {src}",
ty.register_width(scratch)
)?;
writeln!(
func.current_branch(),
"movd {dst}, {}",
ty.register_width(scratch)
)?;
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
ImmRegMem::Mem(_) | ImmRegMem::Rip(_) => {
writeln!(func.current_branch(), "movss {dst}, {src}",)?;
}
ImmRegMem::Reg(_) => {
writeln!(func.current_branch(), "movd {dst}, {src}",)?;
}
}
} else {
writeln!(func.current_branch(), "mov {dst}, {src}",)?;
}
}
Inst::ExternRef => todo!(),
Inst::Alloca => {
let (size, align) = data.as_binary();
let offset = func.alloca(size, align);
mapping.insert(i, (offset, size));
}
Inst::Load(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let src = data.as_node();
let src = self.node_as_operand(&liveness, &mapping, &mut func, strings, src);
match src {
ImmRegMem::Reg(_) => {
writeln!(func.current_branch(), "mov {}, {ty} ptr [{}]", dst, src,)?;
}
ImmRegMem::Mem(ref mem) => {
let mut spill_rax = false;
let scratch = liveness
.get_scratch_register_at_node(node.into())
.unwrap_or_else(|| {
spill_rax = true;
Register::rax
});
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
writeln!(func.current_branch(), "mov {}, {}", scratch, src)?;
writeln!(func.current_branch(), "mov {}, [{}]", dst, scratch)?;
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
_ => {}
}
// stuff
// branch.push((Mnemonic::mov, Operands::One(Operand::imm32(size))))
}
Inst::Store(ty) => {
let (src, dst) = data.as_binary();
let src = self.node_as_operand(&liveness, &mapping, &mut func, strings, src);
let dst = self.node_as_operand(&liveness, &mapping, &mut func, strings, dst);
if src.is_floating() {
writeln!(func.current_branch(), "movss {dst}, {src}")?;
} else {
writeln!(func.current_branch(), "mov {dst}, {src}")?;
}
}
Inst::GetElementPtr(ty) => {
let dst = liveness.get_register(node.into()).unwrap();
let (src, idx) = data.as_binary();
let src = self.node_as_operand(&liveness, &mapping, &mut func, strings, src);
if let ImmRegMem::Mem(_) = &src {
writeln!(func.current_branch(), "lea {dst}, {src}",)?;
}
let offset = idx * ty.bytes();
if offset != 0 {
writeln!(func.current_branch(), "lea {dst}, [{dst} + {offset}]",)?;
}
}
Inst::Parameter(ty) => {
if ty.is_floating() {
float_params += 1;
if float_params > 4 {
eprintln!("more than 4 int params, we dont handle stack params yet!");
}
} else {
int_params += 1;
if int_params > 4 {
eprintln!("more than 4 float params, we dont handle stack params yet!");
}
}
}
Inst::Add(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
if ty.is_floating() {
writeln!(func.current_branch(), "addss {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "movss {dst}, {lhs}")?;
}
} else {
writeln!(func.current_branch(), "add {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
}
Inst::Sub(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
if ty.is_floating() {
writeln!(func.current_branch(), "subss {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "movss {dst}, {lhs}")?;
}
} else {
writeln!(func.current_branch(), "sub {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
}
Inst::Mul(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
ty.register_width(Register::rax)
)?;
}
writeln!(func.current_branch(), "mul {rhs}")?;
if dst.parent_reg() != Register::rax {
writeln!(
func.current_branch(),
"mov {dst}, {}",
ty.register_width(Register::rax)
)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::MulSigned(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
writeln!(func.current_branch(), "imul {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst.parent_reg()) {
writeln!(func.current_branch(), "mov {dst}, {lhs}",)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::Div(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
ty.register_width(Register::rax)
)?;
}
writeln!(func.current_branch(), "div {rhs}")?;
if dst.parent_reg() != Register::rax {
writeln!(
func.current_branch(),
"mov {dst}, {}",
ty.register_width(Register::rax)
)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::DivSigned(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
ty.register_width(Register::rax)
)?;
}
writeln!(func.current_branch(), "idiv {rhs}")?;
if dst.parent_reg() != Register::rax {
writeln!(
func.current_branch(),
"mov {dst}, {}",
ty.register_width(Register::rax)
)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::Rem(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
ty.register_width(Register::rax)
)?;
}
writeln!(func.current_branch(), "div {rhs}")?;
if dst.parent_reg() != Register::rdx {
writeln!(
func.current_branch(),
"mov {dst}, {}",
ty.register_width(Register::rdx)
)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::RemSigned(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let spill_rax = liveness.is_register_in_use_at_node(node.into(), Register::rax)
&& !(lhs.occupy_same_register(Register::rax)
|| rhs.occupy_same_register(Register::rax)
|| dst.parent_reg() == Register::rax);
let spill_rdx = liveness.is_register_in_use_at_node(node.into(), Register::rdx)
&& !(lhs.occupy_same_register(Register::rdx)
|| rhs.occupy_same_register(Register::rdx)
|| dst.parent_reg() == Register::rdx);
if spill_rax {
writeln!(func.current_branch(), "push rax")?;
}
if spill_rdx {
writeln!(func.current_branch(), "push rdx")?;
}
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
ty.register_width(Register::rax)
)?;
}
writeln!(func.current_branch(), "idiv {rhs}")?;
if dst.parent_reg() != Register::rdx {
writeln!(
func.current_branch(),
"mov {dst}, {}",
ty.register_width(Register::rdx)
)?;
}
if spill_rdx {
writeln!(func.current_branch(), "pop rdx")?;
}
if spill_rax {
writeln!(func.current_branch(), "pop rax")?;
}
}
Inst::MulSSE(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
writeln!(func.current_branch(), "mulss {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "movss {dst}, {lhs}")?;
}
}
Inst::DivSSE(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
writeln!(func.current_branch(), "divss {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "movss {dst}, {lhs}")?;
}
}
Inst::RemFP(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let ty = ty.int_repr();
let size = ty.bytes();
writeln!(func.current_branch(), "sub rsp, 0x{:x}", size * 2)?;
writeln!(func.current_branch(), "movss {ty} ptr [rsp], {lhs}")?;
writeln!(
func.current_branch(),
"movss {ty} ptr [rsp + {size}], {rhs}"
)?;
writeln!(func.current_branch(), "fld {ty} ptr [rsp + {size}]")?;
writeln!(func.current_branch(), "fld {ty} ptr [rsp]")?;
writeln!(func.current_branch(), "fprem")?;
writeln!(func.current_branch(), "fst {ty} ptr [rsp]")?;
writeln!(func.current_branch(), "movss {dst}, {ty} ptr [rsp]")?;
writeln!(func.current_branch(), "add rsp, 0x{:x}", size * 2)?;
}
Inst::BitAnd(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
writeln!(func.current_branch(), "and {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::BitOr(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
writeln!(func.current_branch(), "or {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::BitXOr(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
writeln!(func.current_branch(), "xor {lhs}, {rhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::ShiftLeft(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let lhs_is_rcx = lhs.occupy_same_register(Register::rcx);
let rhs_is_rcx = rhs.occupy_same_register(Register::rcx);
let dst_is_rcx = dst.parent_reg() == Register::rcx;
match rhs {
ImmRegMem::Byte(v) => {
writeln!(func.current_branch(), "shl {lhs}, {}", v as u8)?;
}
ImmRegMem::DWord(v) => {
writeln!(func.current_branch(), "shl {lhs}, {}", v as u8)?;
}
ImmRegMem::QWord(v) => {
writeln!(func.current_branch(), "shl {lhs}, {}", v as u8)?;
}
ImmRegMem::Word(v) => {
writeln!(func.current_branch(), "shl {lhs}, {}", v as u8)?;
}
ImmRegMem::Reg(reg) => {
// reg needs to be moved to CL
// if lhs is in rcx, lhs needs to move to rax and we spill rax temporarily
// if neither lhs nor rhx nor dst are rcx, spill rcx temporarily
let spill_rcx = liveness
.is_register_in_use_at_node(node.into(), Register::rcx)
&& !(lhs_is_rcx || rhs_is_rcx || dst_is_rcx);
if spill_rcx {
writeln!(func.current_branch(), "push rcx")?;
}
if !rhs_is_rcx {
writeln!(
func.current_branch(),
"mov {}, {rhs}",
Type::from_bytesize_int(rhs.byte_width())
.register_width(Register::rcx)
)?;
}
if lhs_is_rcx {
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "push rax")?;
}
writeln!(func.current_branch(), "test rax,rax")?;
writeln!(
func.current_branch(),
"mov {}, {lhs}",
Type::from_bytesize_int(lhs.byte_width())
.register_width(Register::rax)
)?;
writeln!(func.current_branch(), "shl rax, cl")?;
writeln!(func.current_branch(), "mov {dst}, rax")?;
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "pop rax")?;
}
} else {
writeln!(func.current_branch(), "shl {lhs}, cl")?;
if lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
if spill_rcx {
writeln!(func.current_branch(), "pop rcx")?;
}
}
_ => unreachable!(),
}
if !lhs.occupy_same_register(dst) && !lhs_is_rcx {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::ShiftRightSigned(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let lhs_is_rcx = lhs.occupy_same_register(Register::rcx);
let rhs_is_rcx = rhs.occupy_same_register(Register::rcx);
let dst_is_rcx = dst.parent_reg() == Register::rcx;
match rhs {
ImmRegMem::Byte(v) => {
writeln!(func.current_branch(), "sar {lhs}, {}", v as u8)?;
}
ImmRegMem::DWord(v) => {
writeln!(func.current_branch(), "sar {lhs}, {}", v as u8)?;
}
ImmRegMem::QWord(v) => {
writeln!(func.current_branch(), "sar {lhs}, {}", v as u8)?;
}
ImmRegMem::Word(v) => {
writeln!(func.current_branch(), "sar {lhs}, {}", v as u8)?;
}
ImmRegMem::Reg(reg) => {
// reg needs to be moved to CL
// if lhs is in rcx, lhs needs to move to rax and we spill rax temporarily
// if neither lhs nor rhx nor dst are rcx, spill rcx temporarily
let spill_rcx = liveness
.is_register_in_use_at_node(node.into(), Register::rcx)
&& !(lhs_is_rcx || rhs_is_rcx || dst_is_rcx);
if spill_rcx {
writeln!(func.current_branch(), "push rcx")?;
}
if !rhs_is_rcx {
writeln!(
func.current_branch(),
"mov {}, {rhs}",
Type::from_bytesize_int(rhs.byte_width())
.register_width(Register::rcx)
)?;
}
if lhs_is_rcx {
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "push rax")?;
}
writeln!(func.current_branch(), "test rax,rax")?;
writeln!(
func.current_branch(),
"mov {}, {lhs}",
Type::from_bytesize_int(lhs.byte_width())
.register_width(Register::rax)
)?;
writeln!(func.current_branch(), "sar rax, cl")?;
writeln!(func.current_branch(), "mov {dst}, rax")?;
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "pop rax")?;
}
} else {
writeln!(func.current_branch(), "sar {lhs}, cl")?;
if lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
if spill_rcx {
writeln!(func.current_branch(), "pop rcx")?;
}
}
_ => unreachable!(),
}
if !lhs.occupy_same_register(dst) && !lhs_is_rcx {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::ShiftRightUnsigned(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
let lhs_is_rcx = lhs.occupy_same_register(Register::rcx);
let rhs_is_rcx = rhs.occupy_same_register(Register::rcx);
let dst_is_rcx = dst.parent_reg() == Register::rcx;
match rhs {
ImmRegMem::Byte(v) => {
writeln!(func.current_branch(), "shr {lhs}, {}", v as u8)?;
}
ImmRegMem::DWord(v) => {
writeln!(func.current_branch(), "shr {lhs}, {}", v as u8)?;
}
ImmRegMem::QWord(v) => {
writeln!(func.current_branch(), "shr {lhs}, {}", v as u8)?;
}
ImmRegMem::Word(v) => {
writeln!(func.current_branch(), "shr {lhs}, {}", v as u8)?;
}
ImmRegMem::Reg(reg) => {
// reg needs to be moved to CL
// if lhs is in rcx, lhs needs to move to rax and we spill rax temporarily
// if neither lhs nor rhx nor dst are rcx, spill rcx temporarily
let spill_rcx = liveness
.is_register_in_use_at_node(node.into(), Register::rcx)
&& !(lhs_is_rcx || rhs_is_rcx || dst_is_rcx);
if spill_rcx {
writeln!(func.current_branch(), "push rcx")?;
}
if !rhs_is_rcx {
writeln!(
func.current_branch(),
"mov {}, {rhs}",
Type::from_bytesize_int(rhs.byte_width())
.register_width(Register::rcx)
)?;
}
if lhs_is_rcx {
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "push rax")?;
}
writeln!(func.current_branch(), "test rax,rax")?;
writeln!(
func.current_branch(),
"mov {}, {lhs}",
Type::from_bytesize_int(lhs.byte_width())
.register_width(Register::rax)
)?;
writeln!(func.current_branch(), "shr rax, cl")?;
writeln!(func.current_branch(), "mov {dst}, rax")?;
if liveness.is_register_in_use_at_node(node.into(), Register::rax) {
writeln!(func.current_branch(), "pop rax")?;
}
} else {
writeln!(func.current_branch(), "shr {lhs}, cl")?;
if lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
if spill_rcx {
writeln!(func.current_branch(), "pop rcx")?;
}
}
_ => unreachable!(),
}
if !lhs.occupy_same_register(dst) && !lhs_is_rcx {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::Not(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let lhs = data.as_node();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
writeln!(func.current_branch(), "not {lhs}")?;
if !lhs.occupy_same_register(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
Inst::Negate(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let lhs = data.as_node();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
if ty.is_floating() {
writeln!(func.current_branch(), "mulss {lhs}, [rip + .NEG1F32]")?;
if lhs != ImmRegMem::Reg(dst) {
writeln!(func.current_branch(), "movss {dst}, {lhs}")?;
}
} else {
writeln!(func.current_branch(), "neg {lhs}")?;
if lhs != ImmRegMem::Reg(dst) {
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
}
}
}
Inst::SignExtend(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let lhs = data.as_node();
let lhs_ty = self.type_of_node(lhs).unwrap();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
if ty == lhs_ty {
writeln!(func.current_branch(), "test {dst}, {dst}")?;
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
} else {
match lhs {
ImmRegMem::Byte(v) => {
writeln!(func.current_branch(), "mov {}, {v}", dst.into_byte())?;
writeln!(
func.current_branch(),
"movsx {dst}, {}",
dst.into_byte()
)?;
}
ImmRegMem::Word(v) => {
writeln!(func.current_branch(), "mov {},{v}", dst.into_word())?;
writeln!(
func.current_branch(),
"movsx {dst}, {}",
dst.into_byte()
)?;
}
ImmRegMem::DWord(v) => {
writeln!(func.current_branch(), "mov {},{v}", dst.into_dword())?;
writeln!(
func.current_branch(),
"movsxd {dst}, {}",
dst.into_byte()
)?;
}
ImmRegMem::QWord(v) => {
writeln!(func.current_branch(), "mov {},{v}", dst.into_qword())?;
}
ImmRegMem::Mem(mem) => match lhs_ty {
Type::Byte | Type::Word => {
writeln!(func.current_branch(), "movsx {dst}, {mem}")?;
}
Type::DWord => {
writeln!(func.current_branch(), "movsxd {dst}, {mem}")?;
}
Type::QWord => {
writeln!(func.current_branch(), "movs {dst}, {mem}")?;
}
_ => {
panic!("cannot sign-extend a floating register")
}
},
ImmRegMem::Reg(reg) => match lhs_ty {
Type::Byte | Type::Word => {
writeln!(func.current_branch(), "movsx {dst}, {reg}")?;
}
Type::DWord => {
writeln!(func.current_branch(), "movsxd {dst}, {reg}")?;
}
Type::QWord => {
writeln!(func.current_branch(), "mov {dst}, {reg}")?;
}
_ => {
panic!("cannot sign-extend a floating register")
}
},
_ => unreachable!(),
}
}
}
Inst::ZeroExtend(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let lhs = data.as_node();
let lhs_ty = self.type_of_node(lhs).unwrap();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
if ty == lhs_ty {
writeln!(func.current_branch(), "test {dst}, {dst}")?;
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
} else {
writeln!(func.current_branch(), "test {0}, {0}", dst.parent_reg())?;
match lhs {
ImmRegMem::Byte(v) => {
writeln!(func.current_branch(), "mov {}, {v}", dst.into_byte())?;
}
ImmRegMem::Word(v) => {
writeln!(func.current_branch(), "mov {}, {v}", dst.into_word())?;
}
ImmRegMem::DWord(v) => {
writeln!(func.current_branch(), "mov {}, {v}", dst.into_dword())?;
}
ImmRegMem::QWord(v) => {
writeln!(func.current_branch(), "mov {}, {v}", dst.into_qword())?;
}
ImmRegMem::Mem(mem) => {
writeln!(
func.current_branch(),
"mov {}, {mem}",
lhs_ty.register_width(dst)
)?;
}
ImmRegMem::Reg(reg) => {
writeln!(
func.current_branch(),
"mov {}, {reg}",
lhs_ty.register_width(dst)
)?;
}
_ => unreachable!(),
}
}
}
Inst::Cmp(ty) => {
let (lhs, rhs) = data.as_binary();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
let rhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, rhs);
if ty.is_floating() {
writeln!(func.current_branch(), "comiss {lhs}, {rhs}")?;
} else {
writeln!(func.current_branch(), "cmp {lhs}, {rhs}")?;
}
}
Inst::IsEq(signed)
| Inst::IsNeq(signed)
| Inst::IsGt(signed)
| Inst::IsLt(signed)
| Inst::IsGe(signed)
| Inst::IsLe(signed) => {
let dst = liveness.get_register(node.into()).unwrap().into_byte();
#[cfg_attr(rustfmt, rustfmt::skip)]
let mnemonic = match inst {
Inst::IsEq(_) => "sete",
Inst::IsNeq(_) => "setne",
Inst::IsGt(_) => if signed {"setg"} else {"seta"},
Inst::IsLt(_) => if signed {"setl"} else {"setb"},
Inst::IsGe(_) => if signed {"setge"} else {"setae"},
Inst::IsLe(_) => if signed {"setle"} else {"setbe"},
_ => unreachable!(),
};
writeln!(func.current_branch(), "{mnemonic} {dst}")?;
}
Inst::IsZero(ty) => {
let dst = ty.register_width(liveness.get_register(node.into()).unwrap());
let lhs = data.as_node();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
writeln!(func.current_branch(), "mov {dst}, {lhs}")?;
writeln!(func.current_branch(), "test {dst}, {dst}")?;
writeln!(func.current_branch(), "setz {}", dst.into_byte())?;
}
Inst::ReturnValue(ty) => {
let lhs = data.as_node();
let lhs = self.node_as_operand(&liveness, &mapping, &mut func, strings, lhs);
if ty.is_floating() {
if !lhs.occupy_same_register(Register::xmm0) {
writeln!(func.current_branch(), "movss xmm0, {lhs}",)?;
}
} else {
if !lhs.occupy_same_register(Register::rax) {
writeln!(
func.current_branch(),
"mov {}, {lhs}",
Register::rax.into_bytesize(lhs.byte_width())
)?;
}
}
writeln!(func.current_branch(), "jmp {name}__epilogue")?;
}
Inst::Return => {
writeln!(func.current_branch(), "jmp {name}__epilogue")?;
}
Inst::Jump => {
let lhs = data.as_node();
if lhs != node + 1 {
writeln!(func.current_branch(), "jmp {name}__L{lhs}")?;
}
}
Inst::Branch(condition) => {
let cond =
self.node_as_operand(&liveness, &mapping, &mut func, strings, condition);
let (lhs, rhs) = data.as_binary();
writeln!(func.current_branch(), "test {cond}, {cond}")?;
match (lhs, rhs) {
_ if lhs == node + 1 => {
writeln!(func.current_branch(), "jz {name}__L{rhs}")?;
}
_ if rhs == node + 1 => {
writeln!(func.current_branch(), "jnz {name}__L{lhs}")?;
}
_ => {
writeln!(func.current_branch(), "jnz {name}__L{lhs}")?;
writeln!(func.current_branch(), "jz {name}__L{rhs}")?;
}
}
}
Inst::Phi2(ty) => {
// noop, need to ensure that input nodes are merged within their branch
}
}
}
let mut buf = String::new();
func.finish(&mut buf, strings)?;
Ok(buf)
}
}
pub struct DisplayMir<'a, 'b> {
mir: &'a Mir,
strings: &'b StringTable,
}
impl<'a, 'b> core::fmt::Display for DisplayMir<'a, 'b> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.mir.render(f, &self.strings)
}
}