return out;
}
+static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
+ out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
+ out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
+ out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
+ out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
+ out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
+ out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
+ return out;
+}
+
+static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
+ vec3_t out;
+ out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
+ out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
+ out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
+ return out;
+}
+
static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
vec3_t out;
out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
}
static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
+ if (isfloat(a)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
+ } else {
+ if (isvector(b)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
+ } else {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
+ }
+ }
return NULL;
}
static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
- if (fold_can_2(a, b))
- return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
+ if (isfloat(a)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
+ } else {
+ if (isvector(b)) {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
+ } else {
+ if (fold_can_2(a, b))
+ return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
+ }
+ }
return NULL;
}
* Creating this causes IR blocks to be marked as 'final'.
* No-Return-Call
*/
- VINSTR_NRCALL
+ VINSTR_NRCALL,
+ /* Emulated instructions. */
+ VINSTR_BITAND_VV,
+ VINSTR_BITOR_VV,
+ VINSTR_BITAND_VF,
+ VINSTR_BITOR_VF
};
/* TODO: elide */
case INSTR_SUB_V:
case INSTR_MUL_VF:
case INSTR_MUL_FV:
+ case VINSTR_BITAND_VV:
+ case VINSTR_BITOR_VV:
+ case VINSTR_BITAND_VF:
+ case VINSTR_BITOR_VF:
#if 0
case INSTR_DIV_VF:
case INSTR_MUL_IV:
}
}
+ /* TODO(divVerent) what does this do? */
if (instr->opcode == INSTR_MUL_VF)
{
value = instr->_ops[2];
return true;
}
+ if (instr->opcode == VINSTR_BITAND_VV) {
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+
+ /* instruction generated */
+ return true;
+ }
+
+ if (instr->opcode == VINSTR_BITOR_VV) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o2.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+
+ /* instruction generated */
+ return true;
+ }
+
+ if (instr->opcode == VINSTR_BITAND_VF) {
+ stmt.opcode = INSTR_BITAND;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+
+ /* instruction generated */
+ return true;
+ }
+
+ if (instr->opcode == VINSTR_BITOR_VF) {
+ stmt.opcode = INSTR_BITOR;
+ stmt.o1.s1 = ir_value_code_addr(instr->_ops[1]);
+ stmt.o2.s1 = ir_value_code_addr(instr->_ops[2]);
+ stmt.o3.s1 = ir_value_code_addr(instr->_ops[0]);
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+ ++stmt.o1.s1;
+ ++stmt.o3.s1;
+ code_push_statement(code, &stmt, instr->context.line);
+
+ /* instruction generated */
+ return true;
+ }
+
+ if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
+ {
+ stmt.o1.u1 = stmt.o3.u1;
+ stmt.o3.u1 = 0;
+ }
+
if (instr->opcode == VINSTR_COND) {
ontrue = instr->bops[0];
onfalse = instr->bops[1];
if (op < VINSTR_END)
return util_instr_str[op];
switch (op) {
- case VINSTR_END: return "END";
- case VINSTR_PHI: return "PHI";
- case VINSTR_JUMP: return "JUMP";
- case VINSTR_COND: return "COND";
- default: return "<UNK>";
+ case VINSTR_END: return "END";
+ case VINSTR_PHI: return "PHI";
+ case VINSTR_JUMP: return "JUMP";
+ case VINSTR_COND: return "COND";
+ case VINSTR_BITAND_VV: return "BITAND_VV";
+ case VINSTR_BITOR_VV: return "BITOR_VV";
+ case VINSTR_BITAND_VF: return "BITAND_VF";
+ case VINSTR_BITOR_VF: return "BITOR_VF";
+ default: return "<UNK>";
}
}
case opid1('|'):
case opid1('&'):
- if (NotSameType(TYPE_FLOAT)) {
+ if ( !(exprs[0]->vtype == TYPE_FLOAT && exprs[1]->vtype == TYPE_FLOAT) &&
+ !(exprs[0]->vtype == TYPE_VECTOR && exprs[1]->vtype == TYPE_FLOAT) &&
+ !(exprs[0]->vtype == TYPE_VECTOR && exprs[1]->vtype == TYPE_VECTOR))
+ {
compile_error(ctx, "invalid types used in expression: cannot perform bit operations between types %s and %s",
type_name[exprs[0]->vtype],
type_name[exprs[1]->vtype]);
return false;
}
- if (!(out = fold_op(parser->fold, op, exprs)))
- out = (ast_expression*)ast_binary_new(ctx,
- (op->id == opid1('|') ? INSTR_BITOR : INSTR_BITAND),
- exprs[0], exprs[1]);
+
+ if (!(out = fold_op(parser->fold, op, exprs))) {
+ /*
+ * IF the first expression is float, the following will be too
+ * since scalar ^ vector is not allowed.
+ */
+ if (exprs[0]->vtype == TYPE_FLOAT) {
+ out = (ast_expression*)ast_binary_new(ctx,
+ (op->id == opid1('|') ? INSTR_BITOR : INSTR_BITAND),
+ exprs[0], exprs[1]);
+ } else {
+ /*
+ * The first is a vector: vector is allowed to xor with vector and
+ * with scalar, branch here for the second operand.
+ */
+ if (exprs[1]->vtype == TYPE_VECTOR) {
+ /*
+ * Xor all the values of the vector components against the
+ * vectors components in question.
+ */
+ out = (ast_expression*)ast_binary_new(ctx,
+ (op->id == opid1('|') ? VINSTR_BITOR_VV : VINSTR_BITAND_VV),
+ exprs[0], exprs[1]);
+ } else {
+ out = (ast_expression*)ast_binary_new(ctx,
+ (op->id == opid1('|') ? VINSTR_BITOR_VF : VINSTR_BITAND_VF),
+ exprs[0], exprs[1]);
+ }
+ }
+ }
break;
case opid1('^'):
/*