You selected ttt1.pl
% ttt1.pl ---- memory based choice : a modeling of TIC-TAC-TOE game
% By Kenryo INDO (Kanto Gakuen University)
% 17-19 Aug 2003. (ttt0.pl)
% modified: 24 Aug 2003. add select_agent/0 in auto_start.
% modified: 3,4 Sep 2003. fix some bugs.
% In short, the program consists of the following prdicates.
% main(dialog): auto_start/0.
% simulation engine: game_play/4.
% agent behavior rules: strategy/5.
%------------------------------
% model of the game board
%------------------------------
game(start).
game(g0).
game(g1).
game(g2).
game(play(G,step(T))):-game(G),number(T).
game(win(P,WinPattern)):-player(P),win_pattern(P,WinPattern,_).
player(o).
player(x).
opponent_player(Q,P):-
player(P),
player(Q),
P \= Q.
:- dynamic board/3.
row(1).
row(2).
row(3).
column(a).
column(b).
column(c).
board(_,columns,[a,b,c]).
board(start,row(R),[_,_,_]):-row(R).
%board(g0,columns,[a,b,c]).
board(g0,row(1),[o,x,o]).
board(g0,row(2),[x,o,o]).
board(g0,row(3),[x,x,o]).
%board(g1,columns,[a,b,c]).
board(g1,row(1),[o,x,o]).
board(g1,row(2),[x,o,o]).
board(g1,row(3),[x,x,x]).
%board(g2,columns,[a,b,c]).
board(g2,row(1),[_,x,o]).
board(g2,row(2),[x,o,_]).
board(g2,row(3),[_,_,x]).
% possible patterns of win
%------------------------------------
board(win(P,(vertical,right)),row(1),[_,_,P]).
board(win(P,(vertical,right)),row(2),[_,_,P]).
board(win(P,(vertical,right)),row(3),[_,_,P]).
board(win(P,(vertical,middle)),row(1),[_,P,_]).
board(win(P,(vertical,middle)),row(2),[_,P,_]).
board(win(P,(vertical,middle)),row(3),[_,P,_]).
board(win(P,(vertical,left)),row(1),[P,_,_]).
board(win(P,(vertical,left)),row(2),[P,_,_]).
board(win(P,(vertical,left)),row(3),[P,_,_]).
board(win(P,(lateral,up)),row(1),[P,P,P]).
board(win(_P,(lateral,up)),row(2),[_,_,_]).
board(win(_P,(lateral,up)),row(3),[_,_,_]).
board(win(_P,(lateral,middle)),row(1),[_,_,_]).
board(win(P,(lateral,middle)),row(2),[P,P,P]).
board(win(_P,(lateral,middle)),row(3),[_,_,_]).
board(win(_P,(lateral,down)),row(1),[_,_,_]).
board(win(_P,(lateral,down)),row(2),[_,_,_]).
board(win(P,(lateral,down)),row(3),[P,P,P]).
board(win(P,(diagonal,upper_left)),row(1),[P,_,_]).
board(win(P,(diagonal,upper_left)),row(2),[_,P,_]).
board(win(P,(diagonal,upper_left)),row(3),[_,_,P]).
board(win(P,(diagonal,upper_right)),row(1),[_,_,P]).
board(win(P,(diagonal,upper_right)),row(2),[_,P,_]).
board(win(P,(diagonal,upper_right)),row(3),[P,_,_]).
% subsumed patterns of win
%---------------------------------
win_pattern(P,WinPattern,[TR1,TR2,TR3]):-
board(win(P,WinPattern),row(1),TR1),
board(win(P,WinPattern),row(2),TR2),
board(win(P,WinPattern),row(3),TR3).
in_ideal(G,P,WinPattern):-
board(G,row(1),TR1),
board(G,row(2),TR2),
board(G,row(3),TR3),
win_pattern(P,WinPattern,[TR1,TR2,TR3]).
in_ideal_1(G,P,ideal):-
game(G),
player(P),
in_ideal(G,P,_WinPattern),
!.
in_ideal_1(_,_,loss).
%in_ideal_2(G,P,WP,remains(S)):-
% moved into the nasty aget section.
% indexed board (spread sheet)
%---------------------------------
indexed_board(G,index(X,Y),Z):-
game(G),
board(G,row(X),XXX),
board(G,columns,COL),
nth1(K,COL,Y),
nth1(K,XXX,Z).
% summary of the game play.
%---------------------------------
played_set(G,all,PLAYS):-
game(G),
findall((R,C,P),
(indexed_board(G,index(R,C),P),\+ var(P)),
PLAYS).
played_set(G,player(P),PLAYS):-
member(P,[o,x]),
played_set(G,all,AllPLAY),
findall((R,C),
(
member((R,C,P),AllPLAY)
),
PLAYS).
% display played positions
%----------------------------
pp_played_set(G,C):-
played_set(G, C, A),
forall(member(X,A),(nl,write(X))).
% display table
%----------------------------
pp_board(G):-
game(G),
clause(board(G,row(_),_),_),!,
board(G,columns,COL),
nl,
forall(
member(L,[' '|COL]),
(
tab(2),
write(L)
)
),
nl,
write(' ----------'),
forall(board(G,row(A),B),
(
nl,
forall(
member(C,[A|B]),
(
tab(2),
(var(C)->C1='_';C1=C),
write(C1)
)
)
)
).
% game_status/2 --- win, loss, or ...
%----------------------------------------
game_status(G,unplayed):-
\+ board(G,row(_),_),
!.
game_status(G,unplayed):-
played_set(G,all,[]).
game_status(G,playing):-
\+ game_status(G,finished(_)),
!.
game_status(G,finished(Ker)):-
played_set(G,all,[_,_,_,_,_,_,_,_,_]),
in_ideal_1(G,o,Po),
in_ideal_1(G,x,Px),
!,
case_of_game_status(Po,Px,Ker).
game_status(G,finished(win(P))):-
player(P),
played_set(G,player(P),PLAYED),
W = win(P,_),
played_set(W,player(P),IDEAL),
subset(IDEAL,PLAYED),
!.
game_status(G,finished(win(P))):-
player(P),
played_set(G,player(P),PLAYED),
W = win(P,_),
played_set(W,player(P),IDEAL),
subset(IDEAL,PLAYED),
!.
game_status(_,abnormal).
case_of_game_status(ideal,ideal,unknown).
case_of_game_status(ideal,loss,win(o)).
case_of_game_status(loss,ideal,win(x)).
case_of_game_status(loss,loss,draw).
%-------------------------
% game simulator
%-------------------------
% move of player
%----------------------------
move(play(G,step(T)),P):- var(T),!,move_1(play(G,step(T)),P).
move(play(_,step(T)),o):- 0 is T mod 2, !.
move(play(_,step(_)),x).
% static version
move_1(G,o):-
played_set(G,player(o),PLAYSo),
length(PLAYSo,Lo),
played_set(G,player(x),PLAYSx),
length(PLAYSx,Lx),
Lo =< Lx,
!.
move_1(_,x).
% feasible act -- find a legitimate game play
%---------------------------------
feasible_act(G,P,index(X,Y)):-
move(G,P),
indexed_board(G,index(X,Y),Q),
var(Q).
% dynamic version with history
feasible_act(play(G,step(T)),P,index(X,Y),PlayHistory):-
feasible_act(play(G,step(T)),P,index(X,Y)),
played_set(play(G,step(T)),all,PlayHistory),
\+ already_used_position(G,index(X,Y),PlayHistory).
already_used_position(G,index(X,Y),PlayHistory):-
\+ var(G),
game(G),
\+ var(PlayHistory),
member((X,Y,P),PlayHistory),
\+ var(P).
already_used_position(G,index(X,Y),PlayHistory):-
\+ var(G),
game(G),
var(PlayHistory),
played_set(G,all,PlayHistory),
member((X,Y,_P),PlayHistory).
%---------------------------------
% game_play/4.
%---------------------------------
game_play(G,step(T),History,Status):-
passport_to_initialize_game(G),
!,
game(G),
played_set(G,all,History),
length(History,T),
initialize_game_board_by_play(G,step(T)),
nl,
write('<>'),
game_status(play(G,step(T)),Status),
pp_board(play(G,step(T))).
game_play(G,step(T),[(X,Y,P)|History],Status):-
%length(History,L), L < 9,
current_play_step(G,step(T0)),
game_status(play(G,step(T0)),playing),
%game_play(G,step(T0),History,_Status0),
% << call the agent and its strategy >>
move(play(G,step(T0)),P),
agent(P,AGENT),
strategy_of_agent(AGENT,RULE),
strategy(play(G,step(T0)),P,History,RULE,(X,Y)),
% << updating >>
T is T0 + 1,
update_game_board_by_play(G,step(T0->T),(X,Y,P)),
game_status(play(G,step(T)),Status),
pp_board(play(G,step(T))),
nl,tab(2),write(decision_rule=RULE),
!.
game_play(G,step(end),History,finished(W)):-
current_play_step(G,step(TF)),
game_status(play(G,step(TF)),finished(W)),
played_set(play(G,step(TF)),all,History),
nl,
write('end of game.'),
%nl,
%write('learn from the records? > '),
%read(y),
learning_by_play(G),
!,
nl,
write('the play history has saved. (learner_data/2)'),
retractall(board(play(G,step(_)),row(_),_)).
% preprocess with user interface
%---------------------------------
% case of no data (1).
passport_to_initialize_game(G):-
\+ clause(board(play(G,_),row(_),_),_),
!.
% case of no data (2).
passport_to_initialize_game(G):-
\+ current_play_step(G,step(_T0)),
!.
% case of playing the game.
passport_to_initialize_game(G):-
current_play_step(G,step(T0)),
game_status(play(G,step(T0)),playing),
!,
fail.
% case of the time after.
passport_to_initialize_game(G):-
current_play_step(G,step(T0)),
game_status(play(G,step(T0)),finished(_)),
!,
fail.
% otherwise ask user.
passport_to_initialize_game(G):-
ask_user_if_play_history_exsists(G).
ask_user_if_play_history_exsists(_G):-
nl,
write('Continue the previous play of this game ? >. '),
read(Y),
\+ member(Y,[y,yes,'Y']),
write('Will you eliminate the records? (y/n) >'),
read(Y1),
member(Y1,[y,yes,'Y']).
% initialize board/3
%---------------------------------
:- dynamic current_play_step/2.
initialize_current_play_step(G,step(T)):-
retractall(current_play_step(G,_)),
assert(current_play_step(G,step(T))).
initialize_game_board_by_play(G,step(T)):-
retractall(board(play(G,step(_T)),row(_),_)),
initialize_current_play_step(G,step(T)),
forall(
board(G,row(R),TR),
assert(board(play(G,step(T)),row(R),TR))
).
% update board/3
%---------------------------------
update_game_board_by_play(G,step(T0->T),(X,Y,P)):-
update_current_play_step(G,step(T0->T)),
retractall(board(play(G,step(T)),row(_),_)),
forall(
board(play(G,step(T0)),row(R),_TR),
update_each_board_by_play(G,step(T0->T),R,(X,Y,P))
).
update_each_board_by_play(G,step(T0->T),R,(X,Y,P)):-
board(play(G,step(T0)),row(R),_TR),
findall(Q,
(
indexed_board(play(G,step(T0)),index(R,C),Q),
((var(Q),R=X,C=Y)->Q=P;true)
),
TR),
A=assert(board(play(G,step(T)),row(R),TR)),
A.
%nl,write(A).
update_current_play_step(G,step(T0->T)):-
retract(current_play_step(G,step(T0))),
T is T0 + 1,
assert(current_play_step(G,step(T))).
% lerning by game playing
%----------------------------------------
% it will be used for the memory based strategies
:- dynamic learner_data/2.
learning_by_play(G):-
current_play_step(G,step(N)),
game_status(play(G,step(N)),FinalState),
length(D,N),
BOARD=board(play(G,step(K)),row(_),_),
MEMORY=learner_data(BOARD,FinalState),
BOARD0=board(play(G,step(K)),row(_),_),
MEMORY0=learner_data(BOARD0,FinalState0),
DATA=(BOARD,FinalState),
DATA0=(BOARD0,FinalState0),
LEARN=assert(MEMORY),
forall(
(
nth1(K,[1|D],K),
BOARD,
(
MEMORY0
-> \+ DATA =@= DATA0
; true
)
),
LEARN
).
list_data(learn(G)):-
listing(
learner_data(
board(play(G,step(_)),row(_),_),
_FinalState
)
).
list_data(board(G)):-
current_play_step(G,step(N)),
length(D,N),
DATA=board(play(G,step(K)),row(_),_),
forall(
(
nth1(K,[1|D],K),
DATA
),
(
nl,write(DATA)
)
).
%-------------------------------------------
% player's strategy --- agent models
%-------------------------------------------
agent(0). % a myopic win seeker.
agent(1). % memory based choice (loss averter).
agent(2). % memory based choice (win seeker).
agent(3). % user direction
agent(4). % explorer
agent(5). % nasty
strategy_of_agent(1,mywin(_)). % the argument is the winning board pattern of the agent oriented.
strategy_of_agent(2,memory(_)). % the argument is the type of risk attitude with memory pattern.
strategy_of_agent(3,user(_)). % the argument is the type of direction by user.
strategy_of_agent(4,explorer(_)). % the argument is the board pattern the agent oriented.
strategy_of_agent(5,nasty(_,_)).
%default assignment of agent for each player
agent(o,2).
agent(x,2).
:- dynamic agent/2.
select_agent:-
abolish(agent/2), % note : abolish agents of players.
nl,
write('----------------------'),nl,
write('Select an agent for o:'),nl,
write('----------------------'),
ask_mode_of_user_specified_choice(REASON),
strategy_of_agent(A,REASON),
assert(agent(o,A)),
nl,
write('----------------------'),nl,
write('Select the agent for x:'),nl,
write('----------------------'),
ask_mode_of_user_specified_choice(REASON1),
strategy_of_agent(B,REASON1),
assert(agent(x,B)).
% cerebral but myopic agent
%-------------------------------------------------------
% cerebral but far from wide-awaked player who will not play to loss
% using strategy thereby put an own stone on an unplayed
% position which is an element of win-ideal if possible.
strategy(G,P,PlayHistory,mywin(Pattern),(X,Y)):-
player(P),
in_ideal(G,P,Pattern),
indexed_board(win(P,Pattern),index(X,Y),Q),
Q==P,
feasible_act(G,P,index(X,Y),PlayHistory).
strategy(G,P,PlayHistory,mywin(blind),(X,Y)):-
player(P),
%\+ in_ideal(G,P,_Pattern),
feasible_act(G,P,index(X,Y),PlayHistory).
% memory based choice
%-------------------------------------------------------
% agent who is not wide-awaked but using memory of played
% game which lead to win and to loss and its results.
strategy(G,P,PlayHistory,memory(REASON),(X,Y)):-
player(P),
member(REASON,[loss_avoid(_),win_seek(_),no_match]),
feasible_act(G,P,index(X,Y),PlayHistory),
memory_based_choice(G,P,REASON,index(X,Y)).
% user specified choice
%-------------------------------------------------------
strategy(G,P,PlayHistory,user(REASON),(X,Y)):-
player(P),
user_specified_choice(G,P,REASON,index(X,Y)),
feasible_act(G,P,index(X,Y),PlayHistory).
% explorer who ought to seek unplayed choices
%-------------------------------------------------------
strategy(G,P,PlayHistory,explorer(Pattern),(X,Y)):-
player(P),
in_ideal(G,P,Pattern),
indexed_board(win(P,Pattern),index(X,Y),Q),
Q==P,
feasible_act(G,P,index(X,Y),PlayHistory),
\+ (
indexed_memory(board(G,index(X,Y),W),_),
W==P
).
strategy(G,P,PlayHistory,explorer(blind),(X,Y)):-
strategy(G,P,PlayHistory,blind,(X,Y)).
% nasty agent
%-------------------------------------------------------
strategy(G,P,PlayHistory,nasty(MyPattern,ignore),(X,Y)):-
in_ideal_2(G,P,MyPattern,remains([X,Y])),
feasible_act(G,P,index(X,Y),PlayHistory).
strategy(G,P,PlayHistory,nasty(ignore,OppPattern),(X,Y)):-
opponent_player(Q,P),
in_ideal_2(G,Q,OppPattern,remains([X,Y])),
feasible_act(G,P,index(X,Y),PlayHistory).
strategy(G,P,PlayHistory,nasty(ignore,OppPattern),(X,Y)):-
opponent_player(Q,P),
in_ideal_2(G,Q,OppPattern,remains(OppSet)),
length(OppSet,L),
\+ (
in_ideal_2(G,Q,_OppPattern1,remains(OppSet1)),
length(OppSet1,L1),
L1 < L
),
member((X,Y),OppSet),
feasible_act(G,P,index(X,Y),PlayHistory).
strategy(G,P,PlayHistory,nasty(ignore,ignore),(X,Y)):-
player(P),
feasible_act(G,P,index(X,Y),PlayHistory).
in_ideal_2(G,P,WP,remains(S)):-
in_ideal(G,P,WP),
played_set(G,player(P),PLAYED),
played_set(win(P,WP),player(P),COMPLETE),
subtract(COMPLETE,PLAYED,S).
%--------------------------------------
% memory based choice
%--------------------------------------
memory_based_choice(Game,Player, loss_avoid(REFER),index(X,Y)):-
Game=play(_,step(T)),
T1 is T + 1,
REFER=play(_,step(T1)),
% Namasu wo huku.
opponent_player(Opponent,Player),
\+ (
in_memory(Game,Player,loss(REFER)),
indexed_memory(board(REFER,index(X,Y),W),finished(win(Opponent))),
structurally_equal_to(W,Player)
).
memory_based_choice(Game,Player, win_seek(REFER),index(X,Y)):-
player(Player),
Game=play(_,step(T)),
T1 is T + 1,
REFER=play(_,step(T1)),
in_memory(Game,Player,win(REFER)),
indexed_memory(board(REFER,index(X,Y),Q),finished(win(Player))),
structurally_equal_to(Q,Player).
memory_based_choice(_G,_P,no_match,_).
structurally_equal_to(Q,P):- Q =@= P.
in_memory(G,P,win( play(G1,step(T)) )):-
player(P),
board(G,row(1),TR1),
board(G,row(2),TR2),
board(G,row(3),TR3),
learner_data(board(play(G1,step(T)),row(1),TR1),finished(win(P))),
learner_data(board(play(G1,step(T)),row(2),TR2),finished(win(P))),
learner_data(board(play(G1,step(T)),row(3),TR3),finished(win(P))).
in_memory(G,P,loss( play(G1,step(T)) )):-
opponent_player(Q,P),
in_memory(G,Q,win( play(G1,step(T)) )).
in_memory(G,P,draw( play(G1,step(T)) )):-
\+ in_memory(G,P,win( play(G1,step(T)) )),
\+ in_memory(G,P,loss( play(G1,step(T)) )).
% memory based analog of indexed board.
%---------------------------------
indexed_memory(board(G,index(X,Y),Z),W):-
learner_data(board(G,row(X),XXX),W),
board(G,columns,COL),
nth1(K,COL,Y),
nth1(K,XXX,Z).
% analog of played set.
%---------------------------------
played_set_in_memory(G,all,PLAYS):-
game(G),
findall((R,C,P,W),
(indexed_memory(board(G,index(R,C),P),W),\+ var(P)),
PLAYS).
played_set_in_memory(G,player(P),PLAYS):-
member(P,[o,x]),
played_set_in_memory(G,all,AllPLAY),
findall((R,C,W),
(
member((R,C,P,W),AllPLAY)
),
PLAYS).
%--------------------------------------
% user specified choice
%--------------------------------------
user_specified_choice(G,P,REASON,index(X,Y)):-
ask_mode_of_user_specified_choice(REASON),
a_user_specified_choice(G,P,REASON,index(X,Y)).
a_user_specified_choice(G,P,user(_),index(X,Y)):-
% current_play_step(G,step(T)),
% pp_board(play(G,step(T))),
nl,
write(' where to put a stone ?'),
nl,tab(2),
write('row: '),
read(X1),
nl,tab(2),
write('column: '),
read(Y1),
indexed_board(G,index(X1,Y1),Q),
(
var(Q)->(X=X1,Y=Y1)
;
nl,
write('WARNING: The position has been used. Please specify another. '),
user_specified_choice(G,P,direct,index(X,Y))
).
a_user_specified_choice(G,P,REASON,index(X,Y)):-
%REASON \= user(_),
strategy(G,P,_PlayHistory,REASON,(X,Y)).
display_menu_of_user_specified_choice:-
nl,
write(' please select from below a most important factor.'),
nl,
forall(
member(X,
[
'0. novice',
'1. avoid loss',
'2. seek win',
'3. specify directly',
'4. explorer',
'5. nasty',
'>'
]
),
(
nl,
tab(3),
write(X)
)
),
nl.
ask_mode_of_user_specified_choice(REASON):-
display_menu_of_user_specified_choice,
read(U),
MENU=
[
(0,mywin(_)),
(1,memory(loss_avoid(_))),
(2,memory(win_seek(_))),
(3,user(_REASON)),
(4,explorer(_)),
(5,nasty(_,_))
],
(
member((U,REASON),MENU)
->
true
;
(
nl,
write('not available.'),
ask_mode_of_user_specified_choice(REASON)
)
).
%---------------------------------
% auto start of game play
%---------------------------------
% modified: 24 Aug 2003.
auto_start:-
select_agent,
auto_start0.
auto_start0:-
game_play(start,step(Time),HISTORY,STATUS),
nl,
forall(
member(X,
[
(step=Time),
(history=HISTORY),
(result=STATUS)
]
),
(
nl,
tab(2),
write(X)
)
),
nl,
write(' go ahead ? >'),
(read(y)-> true;fail),
auto_start0.
auto_start0.
:- dynamic auto_count/1.
auto_count(0).
update_auto_count:-
retract(auto_count(N)),
N1 is N + 1,
assert(auto_count(N1)),
!.
update_auto_count:-
abolish(auto_count/1),
assert(auto_count(0)).
auto_learn:-
game_play(start,step(Time),HISTORY,STATUS),
nl,
forall(
member(X,
[
(step=Time),
(history=HISTORY),
(result=STATUS)
]
),
(
nl,
tab(2),
write(X)
)
),
nl,
auto_count(N),
(
(
N > 100,
write(' stop ? >'),
read(y)
)
-> fail
; true
),
update_auto_count,
auto_learn.
auto_learn.
:- auto_start.
/*
%-------------------------------%
% ruins
%-------------------------------%
%------------------
% patterns of win (alternative)
%------------------
win_pattern(G,P,vertical,LMR):-
game(G),
player(P),
column(C),
vertical_pattern(G,C,VERTICAL),
sort(VERTICAL,[P]),
meaning(vertical,C,LMR).
win_pattern(G,P,lateral,UMD):-
game(G),
player(P),
row(R),
lateral_pattern(G,R,LATERAL),
sort(LATERAL,[P]),
meaning(lateral,R,UMD).
win_pattern(G,P,diagonal,U_RL):-
game(G),
player(P),
diagonal_pattern(G,U_RL,DIAGONAL),
sort(DIAGONAL,[P]).
meaning(vertical,a,left).
meaning(vertical,b,middle).
meaning(vertical,c,right).
meaning(lateral,1,up).
meaning(lateral,2,middle).
meaning(lateral,3,down).
vertical_pattern(G,C,VERTICAL):-
game(G),
column(C),
findall(Q,
indexed_board(G,index(_R,C),Q),
VERTICAL).
lateral_pattern(G,R,LATERAL):-
game(G),
row(R),
findall(Q,
indexed_board(G,index(R,_C),Q),
LATERAL).
diagonal_pattern(G,U_RL,Pattern):-
member(Pattern,[upper_right,upper_left]),
diagonal_pattern_1(G,DIAG,Pattern),
findall(Q,member((_,_,Q),DIAG),U_RL).
diagonal_pattern_1(G,U_RL,upper_left):-
diagonal_pattern(G,RL_U,upper_left,[]),
!,
reverse(RL_U,U_RL).
diagonal_pattern_1(G,U_RL,upper_right):-
game(G),
board(G,columns,COL),
diagonal_pattern(G,U_RL,upper_right,COL),
!.
diagonal_pattern(G,[],upper_left,COL):-
game(G),
board(G,columns,COL).
diagonal_pattern(G,[(R,C,Q)|B],upper_left,COL):-
diagonal_pattern(G,B,upper_left,[C|COL]),
indexed_board(G,index(R,C),Q),
\+ (member((R1,_,_),B),(R=R1;R
return to front page.