& kill_cti_in_e
;
// assumes can_fold = 0
wire sel_ll_gen_cf0
= ~sadr_fold_aa_cf0 & ~sel_inc_alttag
& ~q_state[0] & ~loaded_lastcyc
& kill_cti_in_e
;
wire sel_ll_gen
;
// Expanded macro begin.
// cmux2(sel_ll_gen_mux, 1, sel_ll_gen, sel_ll_gen_cf0, sel_ll_gen_cf1, can_fold)
function [1:1] sel_ll_gen_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_ll_gen_mux = out_fn ;
end
endfunction
assign sel_ll_gen = sel_ll_gen_mux( sel_ll_gen_cf0, sel_ll_gen_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_ll_gen = ~sadr_fold_aa & ~sel_inc_alttag
& ~q_state[0] & ~loaded_lastcyc
& kill_cti_in_e
;
*/
// assumes can_fold = 1
wire sel_inc_last_gen_cf1
= ~sadr_fold_aa_cf1 & ~sel_inc_alttag
& (
~kill_cti_in_e & (stuff_q_cf1 | fetch_ic_cf1)
| predict_taken & ~multiple_fold_d
)
;
// assumes can_fold = 0
wire sel_inc_last_gen_cf0
= ~sadr_fold_aa_cf0 & ~sel_inc_alttag
& (
~kill_cti_in_e & (stuff_q_cf0 | fetch_ic_cf0)
| predict_taken & ~multiple_fold_d
)
;
wire sel_inc_last_gen
;
// Expanded macro begin.
// cmux2(sel_inc_last_gen_mux, 1, sel_inc_last_gen, sel_inc_last_gen_cf0, sel_inc_last_gen_cf1, can_fold)
function [1:1] sel_inc_last_gen_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_inc_last_gen_mux = out_fn ;
end
endfunction
assign sel_inc_last_gen = sel_inc_last_gen_mux( sel_inc_last_gen_cf0, sel_inc_last_gen_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_inc_last_gen = ~sadr_fold_aa & ~sel_inc_alttag
& (
~kill_cti_in_e & (stuff_q | fetch_ic)
| predict_taken & ~multiple_fold_d
)
;
*/
// assumes can_fold = 1
wire sel_inc_ll_gen_cf1
= ~sadr_fold_aa_cf1 & ~sel_inc_alttag
& kill_cti_in_e
& ~q_state[0] & loaded_lastcyc
;
// assumes can_fold = 0
wire sel_inc_ll_gen_cf0
= ~sadr_fold_aa_cf0 & ~sel_inc_alttag
& kill_cti_in_e
& ~q_state[0] & loaded_lastcyc
;
wire sel_inc_ll_gen;
// Expanded macro begin.
// cmux2(sel_inc_ll_gen_mux, 1, sel_inc_ll_gen, sel_inc_ll_gen_cf0, sel_inc_ll_gen_cf1, can_fold)
function [1:1] sel_inc_ll_gen_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_inc_ll_gen_mux = out_fn ;
end
endfunction
assign sel_inc_ll_gen = sel_inc_ll_gen_mux( sel_inc_ll_gen_cf0, sel_inc_ll_gen_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_inc_ll_gen = ~sadr_fold_aa & ~sel_inc_alttag
& kill_cti_in_e
& ~q_state[0] & loaded_lastcyc
;
*/
// assumes can_fold = 1
wire sel_inc_dpc_cf1
= ~sadr_fold_aa_cf1 & ~sel_inc_alttag
& kill_cti_in_e & q_state[0]
;
// assumes can_fold = 0
wire sel_inc_dpc_cf0
= ~sadr_fold_aa_cf0 & ~sel_inc_alttag
& kill_cti_in_e & q_state[0]
;
wire sel_inc_dpc;
// Expanded macro begin.
// cmux2(sel_inc_dpc_mux, 1, sel_inc_dpc, sel_inc_dpc_cf0, sel_inc_dpc_cf1, can_fold)
function [1:1] sel_inc_dpc_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_inc_dpc_mux = out_fn ;
end
endfunction
assign sel_inc_dpc = sel_inc_dpc_mux( sel_inc_dpc_cf0, sel_inc_dpc_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_inc_dpc = ~sadr_fold_aa & ~sel_inc_alttag
& kill_cti_in_e & q_state[0]
;
*/
// replacing the sel_inc_last_gen mux select with
// this term to guarantee that at least one mux select
// is on to the DP muxes. this might even be faster.
// assumes can_fold = 1
wire recirc2_default_cf1
=
~sel_inc_ll_gen_cf1 & ~sel_inc_dpc_cf1 & ~sel_inc_alttag;
// assumes can_fold = 0
wire recirc2_default_cf0
=
~sel_inc_ll_gen_cf0 & ~sel_inc_dpc_cf0 & ~sel_inc_alttag;
wire recirc2_default;
// Expanded macro begin.
// cmux2(recirc2_default_mux, 1, recirc2_default, recirc2_default_cf0, recirc2_default_cf1, can_fold)
function [1:1] recirc2_default_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
recirc2_default_mux = out_fn ;
end
endfunction
assign recirc2_default = recirc2_default_mux( recirc2_default_cf0, recirc2_default_cf1, can_fold) ;
// Expanded macro end.
/*
wire recirc2_default =
~sel_inc_ll_gen & ~sel_inc_dpc & ~sel_inc_alttag;
*/
wire sel_lgen_iva =
~sadr_zero & (iwait_f | flsh_fill_conf);
// assumes can_fold = 1
wire sel_gpc_cf1
= ~sadr_zero & ~sel_lgen_iva & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa_cf1
)
;
// assumes can_fold = 0
wire sel_gpc_cf0
= ~sadr_zero & ~sel_lgen_iva & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa_cf0
)
;
wire sel_gpc;
// Expanded macro begin.
// cmux2(sel_gpc_mux, 1, sel_gpc, sel_gpc_cf0, sel_gpc_cf1, can_fold)
function [1:1] sel_gpc_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_gpc_mux = out_fn ;
end
endfunction
assign sel_gpc = sel_gpc_mux( sel_gpc_cf0, sel_gpc_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_gpc = ~sadr_zero & ~sel_lgen_iva & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa
)
;
*/
// assumes can_fold = 1
wire sel_recirc_cf1
=
~sel_gpc_cf1 & ~sadr_zero
& ~sel_lgen_iva & (
(sel_last_gen_cf1 | sel_ll_gen_cf1));
// assumes can_fold = 0
wire sel_recirc_cf0
=
~sel_gpc_cf0 & ~sadr_zero
& ~sel_lgen_iva & (
(sel_last_gen_cf0 | sel_ll_gen_cf0));
wire sel_recirc;
// Expanded macro begin.
// cmux2(sel_recirc_mux, 1, sel_recirc, sel_recirc_cf0, sel_recirc_cf1, can_fold)
function [1:1] sel_recirc_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_recirc_mux = out_fn ;
end
endfunction
assign sel_recirc = sel_recirc_mux( sel_recirc_cf0, sel_recirc_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_recirc =
~sel_gpc & ~sadr_zero
& ~sel_lgen_iva & (
(sel_last_gen | sel_ll_gen));
*/
// assumes can_fold = 1
wire sel_recirc_inc_cf1
=
~sel_gpc_cf1 & ~sadr_zero & ~sel_lgen_iva & (
sel_inc_last_gen_cf1 | sel_inc_ll_gen_cf1
| sel_inc_dpc_cf1 | sel_inc_alttag);
// assumes can_fold = 0
wire sel_recirc_inc_cf0
=
~sel_gpc_cf0 & ~sadr_zero & ~sel_lgen_iva & (
sel_inc_last_gen_cf0 | sel_inc_ll_gen_cf0
| sel_inc_dpc_cf0 | sel_inc_alttag);
wire sel_recirc_inc;
// Expanded macro begin.
// cmux2(sel_recirc_inc_mux, 1, sel_recirc_inc, sel_recirc_inc_cf0, sel_recirc_inc_cf1, can_fold)
function [1:1] sel_recirc_inc_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_recirc_inc_mux = out_fn ;
end
endfunction
assign sel_recirc_inc = sel_recirc_inc_mux( sel_recirc_inc_cf0, sel_recirc_inc_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_recirc_inc =
~sel_gpc & ~sadr_zero & ~sel_lgen_iva & (
sel_inc_last_gen | sel_inc_ll_gen
| sel_inc_dpc | sel_inc_alttag);
*/
// wire sel_car_iva = hold & ~sadr_zero;
/*
* generate mux selects for icache_adr
*/
// this stuff is used to select the fill pointer onto the
// icache_adr. this takes precedence over forcing
// anything else out. most cases are driven my the MMU.
// one case is driven by a receipt of a parity error
// during the fill of the I$. the I$ controller will
// send the signal start_itag_inv to the IU which will
// recirculate the fill address the next cycle.
wire p_mm_istat_avail
;
Mflipflop_1 p_mm_istat_avail_reg_1(p_mm_istat_avail, mm_istat_avail,ss_clock,1'b0) ;
wire ic_fill_strobe
= ic_force_ifill_g;
wire force_perr
;
Mflipflop_1 force_perr_reg_1(force_perr,start_itag_inv,ss_clock,1'b0) ;
assign force_ifill =
~reset & (ic_fill_strobe | p_mm_istat_avail | force_perr);
// this stuff is used to select the alu_out for the icache_adr
// (equivalent to putting dva out on icache_adr bus) for
// FLUSH and flush ASI operations and for STA ops to
// the I$ SRAMs.
// REGWIRE p_ista_enbl;
// REG(p_ista_enbl_reg,1,p_ista_enbl,ista_enbl,ss_clock,1'b0)
// force the DVA out onto the icache_address bus.
// in theory, it's supposed to work like this
// IFLUSH D E w W R same for STA flush ic
// HIFLUSH D e E W R
// force_dva == ==
// dwait_w ==
//
// STA D E w W R (for IC related ASI's
// HSTA D e E W R but not flush ASI's)
// force_dva ==
// dwait_w ==
//
// LDA D E W R (for IC related ASI's)
// HLDA D E W R
// force_dva ==
//
// the dsbl_force_dva term is used to ensure that there
// is only 1 force_dva per pipe stage. (ie: in the E
// cycle of an IFLUSH, if the pipe is held there, the
// IU will only generate 1 force_dva)
//
// NOTE - this interface has been updated. it is
// unchanged for FLUSH operations, but for lda/sta
// of the I$ tag/ram, the D$ controller will assert
// a signal called i_dva_req. the IU will register
// this signal and use it to force dva. it need not
// be qualified by ~force_ifill since the MMU is
// guaranteed to be idle
assign flush_asi_e = ~fold_annul & iu_asi_e_4;
// wire ic_asi_e = ~fold_annul
// & (iu_asi_e==5'b01101 | iu_asi_e==5'b01100);
// REGWIRE ic_asi_w;
// REG(ic_asi_w_reg,1,ic_asi_w,ic_asi_e,ss_clock,hold)
wire flush_asi_w
;
Mflipflop_1 flush_asi_w_reg_1(flush_asi_w,flush_asi_e,ss_clock,hold) ;
wire dsbl_force_dva
;
wire ndsbl_force_dva
= hold & (force_dva | dsbl_force_dva);
Mflipflop_1 dsbl_fdva_reg_1(dsbl_force_dva,ndsbl_force_dva,ss_clock,1'b0) ;
wire icasi_force_dva
;
Mflipflop_1 icasi_force_dva_reg_1(icasi_force_dva,i_dva_req,ss_clock,1'b0) ;
assign force_dva = ~force_ifill & ~reset & ~dsbl_force_dva & (
~fold_annul & e_hop3==`IFLUSH
| flush_asi_e & e_hop3==`STA // flush_asi_e/ic_asi_e
// | e_hop3==LDA & ic_asi_e // have fold_annul term
// | e_hop3==STA & ic_asi_e // in it already
| w_hop3==`IFLUSH & dwait_w_for_flush
| flush_asi_w & w_hop3==`STA & dwait_w_for_flush
// | ic_asi_w & w_hop3==STA & dwait_w
// | ic_asi_w & w_hop3==LDA & dwait_w
)
| icasi_force_dva & ~reset
;
// | p_ista_enbl
Mflipflop_1 last_force_dva_reg_1(last_force_dva,force_dva,ss_clock,1'b0) ;
// because tag_hit to hold path is so slow, we cannot
// correctly put out the icache_adr for the miss. so,
// we put the correct address out 1 cycle later via
// sel_lgen_ica. this turns on after a tag miss
// until iwait_f makes a 1->0 transition.
//
// above paragraph is old. don't need extend tag miss
// in this determination anymore. cache controller
// generates iwait_f better now.
wire sel_lgen_ica = ~sadr_zero & ~force_dva & ~force_ifill
& (iwait_f | flsh_fill_conf) // | extend_tag_miss)
;
// dupicates of sel_gpc, sel_recirc, and sel_recirc_inc
// for icache_adr - need force_dva and nforce_ifill in
// these versions.
wire dsbl_stuff
=
~sadr_zero & ~force_dva
& ~force_ifill & ~sel_lgen_ica;
// assumes can_fold = 1
wire sel_gpc_ic_cf1
= dsbl_stuff & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa_cf1
)
;
// assumes can_fold = 0
wire sel_gpc_ic_cf0
= dsbl_stuff & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa_cf0
)
;
wire sel_gpc_ic;
// Expanded macro begin.
// cmux2(sel_gpc_ic_mux, 1, sel_gpc_ic, sel_gpc_ic_cf0, sel_gpc_ic_cf1, can_fold)
function [1:1] sel_gpc_ic_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_gpc_ic_mux = out_fn ;
end
endfunction
assign sel_gpc_ic = sel_gpc_ic_mux( sel_gpc_ic_cf0, sel_gpc_ic_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_gpc_ic = dsbl_stuff & (
sadr_jmprett | sadr_tbr // | sadr_zero
| sadr_fold_aa
)
;
*/
// assumes can_fold = 1
wire sel_recirc_ic_cf1
= ~sel_gpc_ic_cf1 & dsbl_stuff
& (sel_last_gen_cf1 | sel_ll_gen_cf1);
// assumes can_fold = 0
wire sel_recirc_ic_cf0
= ~sel_gpc_ic_cf0 & dsbl_stuff
& (sel_last_gen_cf0 | sel_ll_gen_cf0);
wire sel_recirc_ic;
// Expanded macro begin.
// cmux2(sel_recirc_ic_mux, 1, sel_recirc_ic, sel_recirc_ic_cf0, sel_recirc_ic_cf1, can_fold)
function [1:1] sel_recirc_ic_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_recirc_ic_mux = out_fn ;
end
endfunction
assign sel_recirc_ic = sel_recirc_ic_mux( sel_recirc_ic_cf0, sel_recirc_ic_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_recirc_ic = ~sel_gpc_ic & dsbl_stuff
& (sel_last_gen | sel_ll_gen);
*/
// assumes can_fold = 1
wire sel_recirc_inc_ic_cf1
= ~sel_gpc_ic_cf1 & dsbl_stuff
& (sel_inc_last_gen_cf1 | sel_inc_ll_gen_cf1
| sel_inc_dpc_cf1 | sel_inc_alttag);
// assumes can_fold = 0
wire sel_recirc_inc_ic_cf0
= ~sel_gpc_ic_cf0 & dsbl_stuff
& (sel_inc_last_gen_cf0 | sel_inc_ll_gen_cf0
| sel_inc_dpc_cf0 | sel_inc_alttag);
wire sel_recirc_inc_ic;
// Expanded macro begin.
// cmux2(sel_recirc_inc_ic_mux, 1, sel_recirc_inc_ic, sel_recirc_inc_ic_cf0, sel_recirc_inc_ic_cf1, can_fold)
function [1:1] sel_recirc_inc_ic_mux ;
input [1:1] in0_fn ;
input [1:1] in1_fn ;
input select_fn ;
reg [1:1] out_fn ;
begin
case (select_fn) /* synopsys parallel_case */
1'b0: out_fn = in0_fn ;
1'b1: out_fn = in1_fn ;
default: out_fn = 65'hx;
endcase
sel_recirc_inc_ic_mux = out_fn ;
end
endfunction
assign sel_recirc_inc_ic = sel_recirc_inc_ic_mux( sel_recirc_inc_ic_cf0, sel_recirc_inc_ic_cf1, can_fold) ;
// Expanded macro end.
/*
wire sel_recirc_inc_ic = ~sel_gpc_ic & dsbl_stuff
& (sel_inc_last_gen | sel_inc_ll_gen
| sel_inc_dpc | sel_inc_alttag);
*/
wire sadr_zero_ic = sadr_zero & ~force_dva & ~force_ifill;
// this hold is used to keep the car/mar combo clean
// in the event a bicc/bfcc/call is in D and gets interlocked.
// if we did not turn this hold on, we would have problems
// restarting the GPC addr selection correctly since car and
// mar would both have the targ addr. we would like to have
// the addr prior to the targ addr in mar if possible
// for sel_inc_ll_gen or sel_ll_gen.
// REGWIRE phold_noic;
// REG(phold_noic_reg,1,phold_noic,hold_noic,ss_clock,1'b0)
assign hld_car_mar = ~reset & (
ilock & valid_decode_nilock & ~untaken_empty_ilock
& (d_hop2==`BICC | d_hop2==`BFCC | d_hop==`CALL)
);
wire hld_lgens = ~reset & (
ilock & valid_decode_nilock & ~untaken_empty_ilock
& (d_hop2==`BICC | d_hop2==`BFCC | d_hop==`CALL)
)
;
// this hold is used to hold the loaded_lastcyc register.
// see hld_car_mar for particulars. it is necessary to keep
// the loaded_lastcyc information sync'ed with the car/mar
// for GPC generation.
wire hld_llc_almost
;
Mflipflop_1 hld_llc_reg_1(hld_llc_almost,hld_car_mar,ss_clock,hold) ;
assign hld_llc = ~reset & (hld_llc_almost | hold);
// use of load_q in here is puzzling
wire nsequential_f
= (load_q | use_f_nold) & ~cti_in_d_nof;
Mflipflop_1 seq_f_reg_1(sequential_f,nsequential_f,ss_clock,hold) ;
endmodule
// this signal is used to determine if the address
// going out this cycle is a new address. this is
// used to tell the I$ controller that a new address
// is ready. the pipelined version new_iadr_f is
// used to indicate that adr in F is a new fetch.
// use it to determing start_imhold_f.
// wire new_iadr_g = ~reset & ~(sel_ll_gen & ~sel_gpc);
| This page: |
Created: | Thu Aug 19 11:59:49 1999 |
| From: |
../../../sparc_v8/ssparc/iu/Mdecode/rtl/pc_cntl.v
|