You selected nash01.pl

:-O=['% Theory of noncooperative games on prolog:  '
,'% recursive solver for Nash equilibrium and so on'
,'% -------------------------------------------------'
],forall(member(X,O),(nl,write(X))).

% file: nash01.pl 
% modified: 15-29 Jan 2005.
% This program is a version of nash1.pl (Feb 2003).


%--------------------------------------------
%  strategic form games, modelbase, and analysis
%--------------------------------------------

%baseline_game( g(_), on).

:- dynamic is_permissible_to_analyze_recursively/3.

is_permissible_to_analyze_recursively( nash(G),G,off).
is_permissible_to_analyze_recursively( dom( G),G,off).
is_permissible_to_analyze_recursively( idom(w, G),G,off).
is_permissible_to_analyze_recursively( idom(s, G),G,off).

permissible_level_of_recursion( 1).
permissible_level_of_recursion( 2).
%permissible_level_of_recursion( 3).


% games of permissible (for recursive construction)
%--------------------------------------------

game( G, form( (G, [],0) ),players(N),acts(A)):-
   %baseline_game( G, on),
   game_info( G, form( standard), players(N),acts(A)).

game( G, form( mix ),players(N),acts(A)):-
   game_info( G, form( mix), players(N),acts(A)).


% lifting the analysis level up.

game( G, form( (G0,H,K) ),players(N),acts(A)):-
   permissible_level_of_recursion( K),
   K1 is K -1,
   game_info( G, form( (G0, H, K1) ),players(N),acts(A)).

game( G, players(N),acts(S),payoffs(U)):-
   game(G,form(_),players(N),_),
   game_info(G,payoff,S,U).


% strategies and payoffs
%--------------------------------------------

strategy_space_of_player( G, J,S):-
   game_info(G, form(_), players(_), acts(A)),
   member((J,S),A).

possible_strategy_of_player( G, J,X):-
   strategy_space_of_player( G, J,S),
   member(X,S).

payoff_of_game( G, A, P, (J,S,U)):-
   game(G,players(N),acts(A),payoffs(P)),
   nth1(K,N,J),
   nth1(K,A,S),
   nth1(K,P,U).


% game modelbase
%--------------------------------------------

:- dynamic game_info/4.

% indexing games
%--------------------------------------------

% The base level which
% consists of exmples imported from fixpo.pl and  nash1.pl  

game_info(
  g(K),
  form( standard),
  players([1,2]),
  acts([(1,[a1,a2]), (2,[b1,b2])])
 ):-
   member(K, [0,1,2,3,4,5,6]).

% The analysis :
% the game solutions or the extensions
% for each game/4 permitted.

game_info( G, form((G0,[Z|H],K)), players(N), acts(A)):-
   is_permissible_to_analyze_recursively( G, Z, on),
   game( Z, form((G0,H,K)), players(N), acts(A)).


% payoffs (i.e., outcome functions)
%--------------------------------------------

% g(0) a game with two pure strategy equilibria.
game_info( g(0), payoff,[a1,b1], [1,1]).
game_info( g(0), payoff,[a1,b2], [0,0]).
game_info( g(0), payoff,[a2,b1], [0,0]).
game_info( g(0), payoff,[a2,b2], [0,0]).

% g(1) a game with two pure strategy equilibria.
game_info( g(1), payoff,[a1,b1], [2,1]).
game_info( g(1), payoff,[a1,b2], [0,0]).
game_info( g(1), payoff,[a2,b1], [0,0]).
game_info( g(1), payoff,[a2,b2], [1,2]).

% g(2) a game with a pure strategy equilibrium.
game_info( g(2), payoff,[a1,b1], [1,3]).
game_info( g(2), payoff,[a1,b2], [1,0]).
game_info( g(2), payoff,[a2,b1], [2,0]).
game_info( g(2), payoff,[a2,b2], [3,1]).

% g(3) a game with no pure strategy equilibrium.
game_info( g(3), payoff,[a1,b1], [0,3]).
game_info( g(3), payoff,[a1,b2], [1,0]).
game_info( g(3), payoff,[a2,b1], [2,0]).
game_info( g(3), payoff,[a2,b2], [0,1]).

% g(4) a game with no pure strategy equilibrium.
game_info( g(4), payoff,[a1,b1], [0,1]).
game_info( g(4), payoff,[a1,b2], [1,0]).
game_info( g(4), payoff,[a2,b1], [1,0]).
game_info( g(4), payoff,[a2,b2], [0,1]).

% g(5) a game with two pure strategy equilibria.
game_info( g(5), payoff,[a1,b1], [1,5]).
game_info( g(5), payoff,[a1,b2], [0,4]).
game_info( g(5), payoff,[a2,b1], [0,0]).
game_info( g(5), payoff,[a2,b2], [1,1]).

% g(6) a game with no pure strategy equilibrium.
game_info( g(6), payoff,[a1,b1], [1,1]).
game_info( g(6), payoff,[a1,b2], [2,0]).
game_info( g(6), payoff,[a2,b1], [2,0]).
game_info( g(6), payoff,[a2,b2], [1,1]).


% g(40) a 2-person game of standard form (which corresponds 
% to a game tree )

game_info( g(40),
  form(standard),
  players([1,2]),
  acts([
    (1,[(a1,l),(a1,r),(b1,l),(b1,r)]),
    (2,[a2,b2])
  ])
).

game_info( g(40), payoff,[(a1,l),a2],[2,5]).
game_info( g(40), payoff,[(a1,r),a2],[2,5]).
game_info( g(40), payoff,[(b1,l),a2],[4,1]).
game_info( g(40), payoff,[(b1,r),a2],[0,0]).
game_info( g(40), payoff,[(a1,l),b2],[2,5]).
game_info( g(40), payoff,[(a1,r),b2],[2,5]).
game_info( g(40), payoff,[(b1,l),b2],[0,0]).
game_info( g(40), payoff,[(b1,r),b2],[1,4]).

/* 
game tree of g40(cf.,Bicchieri(1993), p.101. figure 3.6)
         b1         b2
  [1]------->[2]------>[1]----->[1,4]
a1 |          |     .   |     r
   |        a2|   .   l V
   V          | .      [0,0]
 [2,5]        V.  r
       [1]----->[0,0]
              |
            l V
             [4,1]
*/


% g(30) a 3-person game of standard form 

game_info( g(30),
  form(standard),
  players([1,2,3]),
  acts([
    (1,[a1,a2]),
    (2,[b1,b2]),
    (3,[c1,c2])
  ])
).

game_info( g(30), payoff,[a1,b1,c1],[2,1,0]).
game_info( g(30), payoff,[a1,b2,c1],[0,0,0]).
game_info( g(30), payoff,[a2,b1,c1],[0,0,0]).
game_info( g(30), payoff,[a2,b2,c1],[1,2,0]).
game_info( g(30), payoff,[a1,b1,c2],[1,2,1]).
game_info( g(30), payoff,[a1,b2,c2],[0,0,1]).
game_info( g(30), payoff,[a2,b1,c2],[0,0,1]).
game_info( g(30), payoff,[a2,b2,c2],[2,1,1]).


%--------------------------------------------
%  Making solutions of games, higher order games, ...
%--------------------------------------------


% Nash's equilibrium point (in pure strategy)
%--------------------------------------------

game_info( nash(G), payoff, S,U):-
   game(G,players(_),acts(S),payoffs(U)),
   nash_profile(G,S,U).


% another (earlier-version styled) 

game_info( nash1(G), payoff, S,U):-
   game(G,players(_),acts(S),payoffs(U)),
   \+ is_motivated_deviator(_,G,S,U,_).

/*
?- game_info( nash1(G),payoff, S,P),nl,write(G:S:P),fail.

g(0):[a1, b1]:[1, 1]
g(0):[a2, b2]:[0, 0]
g(1):[a1, b1]:[2, 1]
g(1):[a2, b2]:[1, 2]
g(2):[a2, b2]:[3, 1]
g(5):[a1, b1]:[1, 5]
g(5):[a2, b2]:[1, 1]
g(40):[ (b1, l), a2]:[4, 1]
g(40):[ (a1, l), b2]:[2, 5]
g(40):[ (a1, r), b2]:[2, 5]
g(30):[a1, b1, c2]:[1, 2, 1]
g(30):[a2, b2, c2]:[2, 1, 1]

No
?- 
*/


% dominance equilibrium.
%--------------------------------------------

game_info( dom(G), payoff, S,U):-
   game(G,players(_),acts(S),payoffs(U)),
   dominance_profile(G,S,U).

/*
?- game_info( dom(G), payoff, S,P),nl,write(G:S:P),fail.

g0:[a1, b1]:[1, 1]

No
?-

*/

% iterated dominance
%--------------------------------------------

game_info( idom(w, G), payoff, S,U):-
   game(G,players(_),acts(S),payoffs(U)),
   T = weakly,
   undominated_profile(T, G,S,U).

game_info( idom(s, G), payoff, S,U):-
   game(G,players(_),acts(S),payoffs(U)),
   T = strictly,
   undominated_profile(T, G,S,U).

/*

?- game_info( idom(T,G), payoff, S,P),T=w,nl,write(T;G:S:P),fail.

w;g(0):[a1, b1]:[1, 1]
w;g(1):[a1, b1]:[2, 1]
w;g(1):[a1, b2]:[0, 0]
w;g(1):[a2, b1]:[0, 0]
w;g(1):[a2, b2]:[1, 2]
w;g(2):[a2, b1]:[2, 0]
w;g(2):[a2, b2]:[3, 1]
w;g(3):[a1, b1]:[0, 3]
w;g(3):[a1, b2]:[1, 0]
w;g(3):[a2, b1]:[2, 0]
w;g(3):[a2, b2]:[0, 1]
w;g(4):[a1, b1]:[0, 1]
w;g(4):[a1, b2]:[1, 0]
w;g(4):[a2, b1]:[1, 0]
w;g(4):[a2, b2]:[0, 1]
w;g(5):[a1, b1]:[1, 5]
w;g(5):[a1, b2]:[0, 4]
w;g(5):[a2, b1]:[0, 0]
w;g(5):[a2, b2]:[1, 1]
w;g(6):[a1, b1]:[1, 1]
w;g(6):[a1, b2]:[2, 0]
w;g(6):[a2, b1]:[2, 0]
w;g(6):[a2, b2]:[1, 1]
w;g(40):[ (b1, l), a2]:[4, 1]
w;g(40):[ (b1, l), b2]:[0, 0]
w;g(30):[a1, b1, c2]:[1, 2, 1]
w;g(30):[a1, b2, c2]:[0, 0, 1]
w;g(30):[a2, b1, c2]:[0, 0, 1]
w;g(30):[a2, b2, c2]:[2, 1, 1]

No
?-

% After modification (about line 21) as follows:
% is_permissible_to_analyze_recursively( idom(w, G),G,on).

?- game_info( nash(G),payoff,S,P),nl,write(G:S:P),fail.

g(0):[a1, b1]:[1, 1]
g(0):[a2, b2]:[0, 0]
g(1):[a1, b1]:[2, 1]
g(1):[a2, b2]:[1, 2]
g(2):[a2, b2]:[3, 1]
g(5):[a1, b1]:[1, 5]
g(5):[a2, b2]:[1, 1]
g(40):[ (b1, l), a2]:[4, 1]
g(40):[ (a1, l), b2]:[2, 5]
g(40):[ (a1, r), b2]:[2, 5]
g(30):[a1, b1, c2]:[1, 2, 1]
g(30):[a2, b2, c2]:[2, 1, 1]
idom(w, g(0)):[a1, b1]:[1, 1]
idom(w, g(1)):[a1, b1]:[2, 1]
idom(w, g(1)):[a2, b2]:[1, 2]
idom(w, g(2)):[a2, b2]:[3, 1]
idom(w, g(5)):[a1, b1]:[1, 5]
idom(w, g(5)):[a2, b2]:[1, 1]
idom(w, g(40)):[ (b1, l), a2]:[4, 1]
idom(w, g(30)):[a1, b1, c2]:[1, 2, 1]
idom(w, g(30)):[a2, b2, c2]:[2, 1, 1]
idom(w, idom(w, g(0))):[a1, b1]:[1, 1]
idom(w, idom(w, g(1))):[a1, b1]:[2, 1]
idom(w, idom(w, g(1))):[a2, b2]:[1, 2]
idom(w, idom(w, g(2))):[a2, b2]:[3, 1]
idom(w, idom(w, g(5))):[a1, b1]:[1, 5]
idom(w, idom(w, g(5))):[a2, b2]:[1, 1]
idom(w, idom(w, g(40))):[ (b1, l), a2]:[4, 1]
idom(w, idom(w, g(30))):[a1, b1, c2]:[1, 2, 1]
idom(w, idom(w, g(30))):[a2, b2, c2]:[2, 1, 1]

No
?-
*/

% mixed equilibrium (NE of the mixed extension)
%--------------------------------------------

game_info( mix(G), form(mix), players(N),acts(B)):-
   mixed_extension( G,players(N),base(_A),prob(B)).

game_info( mix(G), payoff, S,U):-
   mixed_extension( G,players(_),acts(S),payoffs(U)).

/*

?- game_info(mix(g(0)),A,B,C).

A = form(mix)
B = players([1, 2])
C = acts([ (1, [[ (a1, 0), (a2, 100)], [ (a1, 20), (a2, 80)], [ (a1, 40), (a2, 60)], [ (a1, 60), (..., ...)], [ (..., ...)|...], [...|...]]), (2, [[ (b1, 0), (b2, 100)], [ (b1, 20), (b2, 80)], [ (b1, 40), (..., ...)], [ (..., ...)|...], [...|...]|...])]) ;

A = payoff
B = [ (1, [ (a1, 0), (a2, 100)]), (2, [ (b1, 0), (b2, 100)])]
C = [0, 0] 

Yes
?- game_info(nash(mix(g(0))),A,B,C).

A = payoff
B = [ (1, [ (a1, 0), (a2, 100)]), (2, [ (b1, 0), (b2, 100)])]
C = [0, 0] ;

A = payoff
B = [ (1, [ (a1, 100), (a2, 0)]), (2, [ (b1, 100), (b2, 0)])]
C = [1, 1] ;

No
?- 

*/

%--------------------------------------------
%  Tools for the game analysis
%--------------------------------------------


% best response strategy
%--------------------------------------------

is_unilaterally_deviated(S,T,[A,B],[X,Y]):-
   append(A,[X|B],S),
   append(A,[Y|B],T).

is_motivated_deviator( J,G,S,U,([T,V],[X->Y],[Uj->Vj])):-
   payoff_of_game( G, S, U, (J,X,Uj)),
   payoff_of_game( G, T, V, (J,Y,Vj)),
   Uj < Vj,
   is_unilaterally_deviated(S,T,_,[X,Y]).


best_response( (J,X,Uj),G,S,U):-
   payoff_of_game( G, S, U, (J,X,Uj)),
   \+ is_motivated_deviator( J,G,S,U,_).


% The recursive version added in this version.
%--------------------------------------------

nash_profile(G,S,U):-
   game_info(G,form(_),players(N),_),
   nash_profile_1( (G,S,U,N),S,U,N).

nash_profile_1( _,[],[],[]).
nash_profile_1( (G,S,U,N),[X|B],[Uj|V],[J|R]):-
   nash_profile_1( (G,S,U,N),B,V,R),
   best_response( (J,X,Uj),G,S,U).
 

/*

?- nash_profile(G,S,P),nl,write(G:S:P),fail.

g(0):[a1, b1]:[1, 1]
g(0):[a2, b2]:[0, 0]
g(1):[a1, b1]:[2, 1]
g(1):[a2, b2]:[1, 2]
g(2):[a2, b2]:[3, 1]
g(5):[a1, b1]:[1, 5]
g(5):[a2, b2]:[1, 1]
g(40):[ (b1, l), a2]:[4, 1]
g(40):[ (a1, l), b2]:[2, 5]
g(40):[ (a1, r), b2]:[2, 5]
g(30):[a1, b1, c2]:[1, 2, 1]
g(30):[a2, b2, c2]:[2, 1, 1]

No
?- 
*/


%--------------------------------------------
% dominance strategy profile
%--------------------------------------------

dominance_profile(G,S,U):-
   game_info(G,form(_),players(N),_),
   dominance_profile_1( (G,S,N),S,N),
   game_info(G,payoff,S,U).

dominance_profile_1( _,[],[]).
dominance_profile_1( (G,S,N),[X|B],[J|R]):-
   dominance_profile_1( (G,S,N),B,R),
   dominance( J,X,G).
 
dominance( J,X,G):-
   possible_strategy_of_player( G, J,X),
   \+ (
     payoff_of_game( G, S, U, (J,X,Uj)),
     \+ best_response( (J,X,Uj),G,S,U)
   ).



/*

% is_permissible_to_analyze_recursively(All,off).

?- dominance( J,X,G), nl,write(G:J:X),fail.

g(0):1:a1
g(0):2:b1
g(2):1:a2
g(30):3:c2

No
?- 
*/

 
%--------------------------------------------
%  iterated dominance (against pure strategy)
%--------------------------------------------

is_dominated_by( strictly, J,X,G):-
   possible_strategy_of_player( G, J,X),
   forall(
     payoff_of_game( G, S, U, (J,X,Uj)),
     \+ best_response( (J,X,Uj),G,S,U)
   ).

is_dominated_by( weakly, J,X,G):-
   possible_strategy_of_player( G, J,X),
   \+ \+ (
     payoff_of_game( G, S, U, (J,X,Uj)),
     is_motivated_deviator( J,G,S,U,_)
   ),
   forall(
     payoff_of_game( G, S, U, (J,X,Uj)),
     is_weakly_motivated_deviator( J,G,S,U,_)
   ).

is_weakly_motivated_deviator( J,G,S,U,([T,V],[X->Y],[Uj->Vj])):-
   payoff_of_game( G, S, U, (J,X,Uj)),
   payoff_of_game( G, T, V, (J,Y,Vj)),
   Uj =< Vj,
   is_unilaterally_deviated(S,T,_,[X,Y]).


% survived strategies after an/iterated elimination of 
% strictly dominated actions.

undominated_profile(T,G,S,P):-
   member(T,[weakly,strictly]),
   game(G,players(N),acts(S),payoffs(P)),
   undominated_profile_1(T,G,N,S).

dominated_profile(T,G,S,P):-
   member(T,[weakly,strictly]),
   game(G,players(N),acts(S),payoffs(P)),
   \+ undominated_profile_1(T,G,N,S).

undominated_profile_1(_,_,[],[]).
undominated_profile_1(T,G,[J|N],[X|S]):-
   undominated_profile_1(T,G,N,S),
   \+ is_dominated_by(T,J,X,G).


/*

?- is_dominated_by(T,J,X,G),T=strictly,nl,write(T;G;J;X),fail.

strictly;g(2);1;a1
strictly;g(40);1;b1, r
strictly;g(30);3;c1

No
?- is_dominated_by(T,J,X,G),T\=strictly,nl,write(T;G;J;X),fail.

weakly;g(0);1;a2
weakly;g(0);2;b2
weakly;g(2);1;a1
weakly;g(40);1;a1, l
weakly;g(40);1;a1, r
weakly;g(40);1;b1, r
weakly;g(30);3;c1

No
?- undominated_profile(T,G,S,P),member(G,[g(0),g(40)]),nl,write(T;G;S;P),fail.

weakly;g(0);[a1, b1];[1, 1]
weakly;g(40);[ (b1, l), a2];[4, 1]
weakly;g(40);[ (b1, l), b2];[0, 0]
strictly;g(0);[a1, b1];[1, 1]
strictly;g(0);[a1, b2];[0, 0]
strictly;g(0);[a2, b1];[0, 0]
strictly;g(0);[a2, b2];[0, 0]
strictly;g(40);[ (a1, l), a2];[2, 5]
strictly;g(40);[ (a1, r), a2];[2, 5]
strictly;g(40);[ (b1, l), a2];[4, 1]
strictly;g(40);[ (a1, l), b2];[2, 5]
strictly;g(40);[ (a1, r), b2];[2, 5]
strictly;g(40);[ (b1, l), b2];[0, 0]

No
?- 
*/

%--------------------------------------------
% mixed extension and expected payoffs
%--------------------------------------------

% mixed extension
%--------------------------------------------

mixed_extension( G,players(N),base(A),prob(B)):-
   game( G, form((_,[],0)), players(N),acts(A)),
   findall((J,PjL),
    (
     member(J,N),
     findall(Pj,
       mixed_strategy_of_player(G,J,_,_,Pj),
     PjL)
    ),
   B).


mixed_extension(G,players(N),acts(P),payoffs(V)):-
   mixed_strategy_profile(G,N,_S,P),
   findall((A, E),
    (
     exp_payoffs_at_act_profile(G,P,A,_Pa,E)
    ),
   EL),
   mixed_extension_1( EL, N, V).

mixed_extension_1( _, [], []).
mixed_extension_1( EL, [J|N], [Vj|V]):-
   mixed_extension_1( EL, N, V),
   findall(Vj,
    (
     member((_A,E),EL),
     member((J,Vj),E)
    ),
   S),
   sum(S, Vj).

/*

No
?- mixed_extension(g(0),players([1,2]),acts([P,Q]),payoffs(E)),
member(V,E),V>0.7.

P = [ (a1, 100), (a2, 0)]
Q = [ (b1, 80), (b2, 20)]
E = [0.8, 0.8]
V = 0.8 ;

P = [ (a1, 100), (a2, 0)]
Q = [ (b1, 80), (b2, 20)]
E = [0.8, 0.8]
V = 0.8 ;

P = [ (a1, 80), (a2, 20)]
Q = [ (b1, 100), (b2, 0)]
E = [0.8, 0.8]
V = 0.8 ;

P = [ (a1, 80), (a2, 20)]
Q = [ (b1, 100), (b2, 0)]
E = [0.8, 0.8]
V = 0.8 ;

P = [ (a1, 100), (a2, 0)]
Q = [ (b1, 100), (b2, 0)]
E = [1, 1]
V = 1 ;

P = [ (a1, 100), (a2, 0)]
Q = [ (b1, 100), (b2, 0)]
E = [1, 1]
V = 1 ;

No
?- 

*/


% expected payoff given a mixed strategy profile
%--------------------------------------------

exp_payoffs_at_act_profile(G,P,A,Pa,E):-
   mixed_strategy_profile(G,N,_S,P),
   game(G,players(N),acts(A),payoffs(U)),
   scale_for_unit_probability( Z),   
   exp_payoff_1(E,N,A,U,P,Z,Z,Pa,_).

% simaltanous (state) probability and the expected values
 
exp_payoff_1([],[],[],[],[],_,Pa,Pa,[]).
exp_payoff_1([(J,Vj)|E],[J|N],[Aj|A],[Uj|U],[Pj|P],Z,Q,Pa,[Paj|R]):-
   member((Aj,Paj),Pj),
   Q1 is Q * Paj /Z,
   exp_payoff_1(E,N,A,U,P,Z,Q1,Pa,R),
   Vj is Uj * Pa /Z.
   

/*

?- exp_payoffs_at_act_profile(g(0),[P,Q],A,Pa,E),
member((_,V),E),V>0.8.

P = 1, [ (a1, 100), (a2, 0)]
Q = 2, [ (b1, 100), (b2, 0)]
A = [a1, b1]
Pa = 100
E = [ (1, 1), (2, 1)]
V = 1 ;

P = 1, [ (a1, 100), (a2, 0)]
Q = 2, [ (b1, 100), (b2, 0)]
A = [a1, b1]
Pa = 100
E = [ (1, 1), (2, 1)]
V = 1 ;

No
?- 

*/



% profile of mixed strategies
%--------------------------------------------

mixed_strategy_profile(G,N,S,P):-
   game_info(G,form(standard),players(N),acts(_)),
   mixed_strategy_profile_1(G,N,S,P,N).

mixed_strategy_profile_1(_,_,[],[],[]).

mixed_strategy_profile_1(G,N,[(J,Aj,S)|T],[P|Q],[J|N1]):-
   mixed_strategy_profile_1(G,N,T,Q,N1),
   mixed_strategy_of_player(G,J,Aj,S,P).

/*

?- mixed_strategy_profile(g(0),[1,2],[S,T],[P,Q]).

S = 1, [a1, a2], [20, 80]
T = 2, [b1, b2], [20, 80]
P = [ (a1, 20), (a2, 80)]
Q = [ (b1, 20), (b2, 80)] ;

S = 1, [a1, a2], [40, 60]
T = 2, [b1, b2], [20, 80]
P = [ (a1, 40), (a2, 60)]
Q = [ (b1, 20), (b2, 80)] 

Yes
?- 
*/


% mixed strategy --- its two interpretation 
% (1) probability which player uses to randomize.
% (2) player's (common) belief about the collective behavior.
%--------------------------------------------

mixed_strategy_of_player(G,J,Aj,P,C):-
   game_info(G,form(standard),players(_),acts(A)),
   member((J,Aj),A),
   scale_for_unit_probability( Unit),
   mixed_strategy_of_player_1(Aj,P,C,Unit).

mixed_strategy_of_player_1([],[],[],O):- measure_zero(O).
mixed_strategy_of_player_1([X|S],[Y|P],[(X,Y)|C],Q):-
   probability_value(Y),
   R is Q - Y,
   mixed_strategy_of_player_1(S,P,C,R).

measure_zero(O):- O^2 < 10^(-10).

/*

?- mixed_strategy_of_player(g(0),1,Aj,P,C).
Aj = [a1, a2]
P = [0, 100]
C = [ (a1, 0), (a2, 100)] ;

Aj = [a1, a2]
P = [20, 80]
C = [ (a1, 20), (a2, 80)] 

Yes
?- 
*/

% expected payoff given a degenerated probability
%--------------------------------------------

exp_payoff_of_pure_strategy(G,P,A,Pa,E,(J,Aj,Ej)):-
   mixed_strategy_profile_but_for_J(G,N,_S,P,(J,Aj)),
   game(G,players(N),acts(A),payoffs(U)),
   scale_for_unit_probability( Z),   
   exp_payoff_1(E,N,A,U,P,Z,Z,Pa,_),
   member( (J,Ej),E).

/*
?- exp_payoff_of_pure_strategy(g(0),[P,Q],A,Pa,E,X),X=(_,_,V),V>0.5.

P = 1, [ (a1, 60), (a2, 40)]
Q = 2, [ (b1, 100), (b2, 0)]
A = [a1, b1]
Pa = 60
E = [ (1, 0.6), (2, 0.6)]
X = 2, b1, 0.6
V = 0.6 ;

P = 1, [ (a1, 80), (a2, 20)]
Q = 2, [ (b1, 100), (b2, 0)]
A = [a1, b1]
Pa = 80
E = [ (1, 0.8), (2, 0.8)]
X = 2, b1, 0.8
V = 0.8 ;

P = 1, [ (a1, 100), (a2, 0)]
Q = 2, [ (b1, 100), (b2, 0)]
A = [a1, b1]
Pa = 100
E = [ (1, 1), (2, 1)]
X = 2, b1, 1
V = 1 

Yes
?- 

*/


% mixed strategy profile assuming the player J's strategy is Aj.
%--------------------------------------------

mixed_strategy_profile_but_for_J(G,N,S,P,(J,Aj)):-
   game_info(G,form(standard),players(N),acts(_)),
   mixed_strategy_profile_2(G,N,S,P,N,(J,Aj)).

mixed_strategy_profile_2(_,_,[],[],[],_).

mixed_strategy_profile_2(G,N,[(J,Aj,S)|T],[P|Q],[J|N1],(I,Xj)):-
   mixed_strategy_profile_2(G,N,T,Q,N1,(I,Xj)),
   J \= I, 
   mixed_strategy_of_player(G,J,Aj,S,P).

mixed_strategy_profile_2(G,N,[(J,Aj,S)|T],[P|Q],[J|N1],(J,Xj)):-
   mixed_strategy_profile_2(G,N,T,Q,N1,(J,Xj)),
   degenerated_mixed_strategy_of_player(G,J,Aj,S,P,Xj).


/*

?- mixed_strategy_profile_but_for_J(g(0),[1,2],[S,T],[P,Q],JAj).

S = 1, [a1, a2], [0, 100]
T = 2, [b1, b2], [100, 0]
P = [ (a1, 0), (a2, 100)]
Q = [ (b1, 100), (b2, 0)]
JAj = 2, b1 ;

S = 1, [a1, a2], [20, 80]
T = 2, [b1, b2], [100, 0]
P = [ (a1, 20), (a2, 80)]
Q = [ (b1, 100), (b2, 0)]
JAj = 2, b1 

Yes
?- 
*/


% degenerated mixed strategy
%--------------------------------------------

degenerated_mixed_strategy_of_player(G,J,Aj,P,C, Xj):-
   strategy_space_of_player( G, J,Aj),
   scale_for_unit_probability( Unit),
   degenerated_mixed_strategy_of_player_1(Aj,P,C,Unit, Xj).

degenerated_mixed_strategy_of_player_1([],[],[],0, _).
degenerated_mixed_strategy_of_player_1([X|S],[Unit|P],[(X,Unit)|C],Unit,X):-
   degenerated_mixed_strategy_of_player_1(S,P,C,0,X).
degenerated_mixed_strategy_of_player_1([X|S],[0|P],[(X,0)|C],Q,Xj):-
   degenerated_mixed_strategy_of_player_1(S,P,C,Q,Xj).

/*

?- degenerated_mixed_strategy_of_player(g(0),1,Aj,P,C, Xj).

Aj = [a1, a2]
P = [100, 0]
C = [ (a1, 100), (a2, 0)]
Xj = a1 ;

Aj = [a1, a2]
P = [0, 100]
C = [ (a1, 0), (a2, 100)]
Xj = a2 ;

No
?- 

*/

% Nash's continuous map (Nash, 1951)
%--------------------------------------------
%  For each player j, x[j,k], k=1,...,m_j, 
% the probability of his k-th 
% pure strategy (s_j[k]) will be adjusted by the following.
%  
%   x'[j,k] =(x[j,k]+d[j,k])/(1+abs(d[j,1])+...+abs(d[j,m_j]))
% where
%   d[j,k] = max( v_j( x / s_j[k]) - v_j( X), 0).

% x / s_j[k] above means that the probability profile x 
% has degenerated for its j-th coordinate. 


iterated_nash_map( K, (G,N), PL, VL, Q, (P0,V0) ):-
   (integer(K);K=0),
   length( PL, K),
   %mixed_extension(G,players(N),acts(P0),payoffs(V0)),
   iterated_nash_map_1( K, (G,N), PL,VL, Q, (P0,V0)).

:- dynamic iterated_nash_map_0/6.

exist_iterated_nash_map_0( K1, (G, N), PL, VL, P, PV0):-
   clause(
     iterated_nash_map_0( K1, (G, N), PL, VL, P, PV0),
     true
   ).

iterated_nash_map_1( K1, (G, N), PL, VL, P, PV0):-
   exist_iterated_nash_map_0( K1, (G, N), PL, VL, P, PV0).

iterated_nash_map_1( 1, (G,N), [P], [V], Q, (P,V)):-
   \+ exist_iterated_nash_map_0( 1, (G, N), _, _, _, (P,V)),
   nash_map( G, N, P, Q, V),
   add_iterated_nash_map_0( 1, (G,N), [P], [V], Q, (P,V)).

iterated_nash_map_1( K, (G, N), [P|PL], [V|VL], Q, PV0):-
   \+ exist_iterated_nash_map_0( K, (G, N), _, _, _, PV0),
   K1 is K - 1,
   iterated_nash_map_1( K1, (G, N), PL, VL, P, PV0),
   nash_map( G, N, P, Q, V),
   add_iterated_nash_map_0( K, (G,N), [P|PL], [V|VL], Q, PV0).

add_iterated_nash_map_0( K, (G,N), PL, VL, Q, PV0):-
   assert(
     iterated_nash_map_0(K, (G,N), PL, VL, Q, PV0)
   ).


/*

?- iterated_nash_map( 3, (g(0),[1,2]), [P3,P2,P1], VL, Q, (P0,V0) ).

P3 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
P2 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
P1 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
VL = [[0, 0], [0, 0], [0, 0]]
Q = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
P0 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
V0 = [0, 0] ;

P3 = [[ (a1, 81.6327), (a2, 18.3673)], [ (b1, 61.5385), (b2, 38.4615)]]
P2 = [[ (a1, 80), (a2, 20)], [ (b1, 44.4444), (b2, 55.5556)]]
P1 = [[ (a1, 80), (a2, 20)], [ (b1, 0), (b2, 100)]]
VL = [[0.502355, 0.502355], [0.355556, 0.355556], [0, 0]]
Q = [[ (a1, 83.4979), (a2, 16.5021)], [ (b1, 70.7288), (b2, 29.2712)]]
P0 = [[ (a1, 80), (a2, 20)], [ (b1, 0), (b2, 100)]]
V0 = [0, 0] 

Yes
?- iterated_nash_map( 2, (g(30),[1,2,3]), [P2,P1], VL, Q, (P0,V0) ).

P2 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)], [ (c1, 0), (c2, 100)]]
P1 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)], [ (c1, 0), (c2, 100)]]
VL = [[2, 1, 1], [2, 1, 1]]
Q = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)], [ (c1, 0), (c2, 100)]]
P0 = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)], [ (c1, 0), (c2, 100)]]
V0 = [2, 1, 1] 

Yes
?- 

*/


nash_map( G, N, P, Q, V):-
   directed_motivation_profile(G,N,P,V,D),
   nash_map_updating_profile(N, D, P, Q).

nash_map_updating_profile( [], [], [], []).
nash_map_updating_profile( [J|N], [Dj|D], [Pj|P], [Qj|Q]):-
   nash_map_updating_profile( N, D, P, Q),
   sum( [1|Dj], Cj),
   nash_map_updating_probability_of_player( J, Dj, Pj, Qj, Cj).

nash_map_updating_probability_of_player( _, [], [], [], _).
nash_map_updating_probability_of_player( J, [Dk|D], [(Ak,Pk)|P], [(Ak,Qk)|Q], C):-
   nash_map_updating_probability_of_player( J, D, P, Q, C),
   scale_for_unit_probability( U),   
   Qk is (Pk + U * Dk ) /C.

/*

?- nash_map( G, N, P, Q, V).

G = g(0)
N = [1, 2]
P = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
Q = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
V = [0, 0] ;

G = g(0)
N = [1, 2]
P = [[ (a1, 20), (a2, 80)], [ (b1, 0), (b2, 100)]]
Q = [[ (a1, 20), (a2, 80)], [ (b1, 16.6667), (b2, 83.3333)]]
V = [0, 0] ;

G = g(0)
N = [1, 2]
P = [[ (a1, 40), (a2, 60)], [ (b1, 0), (b2, 100)]]
Q = [[ (a1, 40), (a2, 60)], [ (b1, 28.5714), (b2, 71.4286)]]
V = [0, 0] 

Yes
?- 

*/


directed_motivation_profile(G,N,P,V,D):-
   mixed_extension(G,players(N),acts(P),payoffs(V)),
   directed_motivation_profile_1(G,(N,P,V),(N,P,V),D).

directed_motivation_profile_1(_,_,([],[],[]),[]).

directed_motivation_profile_1(G,(N,P,V),([J|H],[Pj|Q],[Vj|E]),[Dj|D]):-
   directed_motivation_profile_1(G,(N,P,V),(H,Q,E),D),
   directed_motivation_of_player( (G,N,P,V), (J, Pj,Vj,Dj)).

/*

?- directed_motivation_profile(g(0),[1,2],P,V,D).

P = [[ (a1, 0), (a2, 100)], [ (b1, 0), (b2, 100)]]
V = [0, 0]
D = [[0, 0], [0, 0]] ;

P = [[ (a1, 20), (a2, 80)], [ (b1, 0), (b2, 100)]]
V = [0, 0]
D = [[0, 0], [0.2, 0]] 

Yes
?- 

*/

directed_motivation_of_player( (G,N,P,V), (J, Pj,Vj,Dj)):-
   strategy_space_of_player( G, J, Aj),
   directed_motivation_of_player_1( (G,N,P,V), Aj, (J, Pj,Vj),Dj).

directed_motivation_of_player_1( _,[],_,[]).
directed_motivation_of_player_1( (G,N,P,V), [Ajk|A],(J,Pj,Vj),[Djk|Dj]):-
   directed_motivation_of_player_1( (G,N,P,V), A,(J,Pj,Vj),Dj),
   directed_motivation_of_pure_strategy( (G,N),(P,V),(J,Ajk,Pj,Vj),_,Djk).

/*

?- directed_motivation_of_player( (g(0),[1,2],P,[1,1]), JPVD).

P = [[ (a1, 100), (a2, 0)], [ (b1, 100), (b2, 0)]]
JPVD = 1, [ (a1, 100), (a2, 0)], 1, [0, 0] ;

P = [[ (a1, 100), (a2, 0)], [ (b1, 100), (b2, 0)]]
JPVD = 2, [ (b1, 100), (b2, 0)], 1, [0, 0] ;

No
?- 

*/

directed_motivation_of_pure_strategy( (G,N), (P,V),(J,Ajk,Pj,Vj),(P1,E),Dj):-
   (var(P)->mixed_extension(G,players(N),acts(P),payoffs(V));true),
   nth1(K,N,J),
   (var(Vj)->nth1(K,V,Vj);true),
   length([_|M],K),
   is_unilaterally_deviated(P,P1,[M,_],[Pj,_Qj]),
   mixed_strategy_profile_but_for_J( G, N, _, P1,(J,Ajk)),
   mixed_extension(G,players(N),acts(P1),payoffs(E)),
   nth1(K,E,Ej),
   max( Dj, (Dj is 0; Dj is Ej - Vj)).

/*

?- directed_motivation_of_pure_strategy( GN, PV,JAV,QE,D),D>0.

GN = g(0), [1, 2]
PV = [[ (a1, 20), (a2, 80)], [ (b1, 0), (b2, 100)]], [0, 0]
JAV = 2, b1, [ (b1, 0), (b2, 100)], 0
QE = [[ (a1, 20), (a2, 80)], [ (b1, 100), (b2, 0)]], [0.2, 0.2]
D = 0.2 

Yes
?- 

*/



%--------------------------------------------
%  nash01 commons
%--------------------------------------------

% probability measure
%--------------------------------------------

default_precision_of_probability( intervals: 5).

scale_for_unit_probability( 100).

:- dynamic probability_value/1.


probability_value(P):-
   default_probability_value(P).

probability_value(0.5).  % This clause may be arbitarily modified/added/removed if needed.

probability_value(P):-
   probability_value_of_user_specified(P).

default_probability_value(P):-
   all_default_probability_values(PL),
   member( P, PL).

probability_value_of_user_specified(P):-
   number(P),
   0 < P,
   scale_for_unit_probability( U),   
   U >= P,
   \+ default_probability_value(P).


%all_default_probability_values(PL):-
%   findall( P, probability_value(P), PL).

all_default_probability_values(PL):-
   default_precision_of_probability( intervals: M),
   scale_for_unit_probability( U), 
   probability_values_no_less_than(0, U/M, PL).

probability_values_no_less_than( U, U/_, [U]).
probability_values_no_less_than( C, S, [C|PL]):-
   C1 is C + S,
   probability_values_no_less_than( C1, S, PL),
   !.


/*
?- probability_value(I).

I = 0 ;

I = 20 ;

I = 40 ;

I = 60 ;

I = 80 ;

I = 100 ;

No
?- 
*/



% arithmetic
%--------------------------------------------

sum([],0).
sum([X|Members],Sum):-
   sum(Members,Sum1),
   Sum is Sum1 + X.

abs_sum([],0).
abs_sum([X|Members],Sum):-
   abs_sum(Members,Sum1),
   Sum is Sum1 + abs(X).

product([],1).
product([X|Members],Z):-
   product(Members,Z1),
   Z is Z1 * X.

sum_product([],[],[],0).
sum_product([P|Q],[A|B],[E|F],V):-
    length(Q,N),
    length(B,N),
    sum_product(Q,B,F,V1),
    E is P * A,
    V is V1 + E.

expected_value(P,W,F,E):-
   sum_product(P,W,F,E).


%  max/ min in elements of a list
% ----------------------------------------  %

max_of(X,[X]).
max_of(Z,[X|Y]):-
   max_of(Z1,Y),
   (X > Z1 -> Z=X; Z=Z1).
min_of(X,[X]).
min_of(Z,[X|Y]):-
   min_of(Z1,Y),
   (X < Z1 -> Z=X; Z=Z1).

%  max/ min in successful goals
% ----------------------------------------  %
  % X: the objective variable,
  % Goal: the objective function and constraints,

min(X,Goal):-
  max(Z,(Goal,Z is -X)).

max(X,Goal):-
  setof((X,Goal),Goal,Z),
  member((X,Goal),Z),
  \+ (
    member((Y,_),Z),
    Y > X
  ).


% list->tuple transformation
%--------------------------------------------

list_to_tuple( [X], X).
list_to_tuple( [X|Y], (X,Z)):-
list_to_tuple( Y,Z).


% list pair of same length
%--------------------------------------------

list_of_same_length( A, B, K):-
   length( B, K),
   length( A, K).

% io stream: file output
%--------------------------------------------

tell_nash01(Goal,Format):-
   tell_nash01(Goal,Format,once).

tell_nash01(Goal,Format,Mode):-
   tell_nash01_open(S),
   tell_nash01_exec(Goal,Format,Mode),
   tell_nash01_close(S).

tell_nash01_open(S):-
  open('tell_nash01.txt',write,S),
  tell(S).

tell_nash01_exec(Goal,Format,once):-
  Goal->(nl,write(Format))
  ;write('no successful goals found.').

tell_nash01_exec(Goal,Format,forall):-
  Goal,nl,write(Format),fail.

tell_nash01_exec(_Goal,_Format,forall):-
  nl,write('no more successful goals found.').

tell_nash01_close(S):-
  current_stream('tell_nash01.txt',write,S),
  close(S),
  tell(user).



% save (oneshot) nash map data into csv file 
%--------------------------------------------

tell_once_nash_map(G,N,P,Q,Form):-
   N= [1,2],
   game_info( G, form( standard), players(N),acts(_)),
   G = g(I), member(I, [0,1,2,3,4,5,6]),
   P = [[ (a1, P_a1), (a2, P_a2)], [ (b1, P_b1), (b2, P_b2)]],
   Q = [[ (a1, Q_a1), (a2, Q_a2)], [ (b1, Q_b1), (b2, Q_b2)]],
   V = [V1, V2],
   Goal= nash_map( G, N, P, Q, V),
   Form= (V1, V2, P_a1, P_a2, P_b1, P_b2, Q_a1, Q_a2, Q_b1, Q_b2),
   tell_nash01( Goal, Form, once).

tell_forall_nash_map(G,N,Form):-
   N= [1,2],
   game_info( G, form( standard), players(N),acts(_)),
   G = g(I), member(I, [0,1,2,3,4,5,6]),
   P = [[ (a1, P_a1), (a2, P_a2)], [ (b1, P_b1), (b2, P_b2)]],
   Q = [[ (a1, Q_a1), (a2, Q_a2)], [ (b1, Q_b1), (b2, Q_b2)]],
   V = [V1, V2],
   Goal= nash_map( G, N, P, Q, V),
   Form= (V1, V2, P_a1, P_a2, P_b1, P_b2, Q_a1, Q_a2, Q_b1, Q_b2),
   tell_nash01( Goal, Form, forall).


% save iterated nash map data into csv file 
%--------------------------------------------

% Note: The limit of number of iteration is about 100 at once.
% But it will be possible when you repeat sometimes 
% in virture of the intermediate results which have been asserted.


tell_once_iterated_nash_map(K, G,N,( P0,V0)):-
   game_info( G, form( standard), players(N),acts(A)),
   Goal1 = iterated_nash_map( K, (G, N), PL, VL, Q, (P0,V0)),
   Goal2 = mixed_extension(G,players(N),acts(Q),payoffs(Vq)),
   Goal1,
   Goal2,
   Goal= (
     nth0(L, [Q|PL], P),
     nth0(L, [Vq|VL], V),
     K1 is K - L
   ),
   schema_for_tell_iterated_nash_map( [K1,A,P,V], Form),
   tell_nash01( Goal, Form, forall).

tell_forall_iterated_nash_map(K, G,N):-
   game_info( G, form( standard), players(N),acts(A)),
   Goal1 = iterated_nash_map( K, (G, N), PL, VL, Q, _PV0),
   Goal2 = mixed_extension(G,players(N),acts(Q),payoffs(Vq)),
   Goal= (
     Goal1,
     Goal2,
     nth0(L, [Q|PL], P),
     nth0(L, [Vq|VL], V),
     K1 is K - L
   ),
   schema_for_tell_iterated_nash_map( [K1,A,P,V], Form),
   tell_nash01( Goal, Form, forall).

schema_for_tell_iterated_nash_map( [K1,A,P,V], Form):-
   A = [(1, [a1, a2]), (2, [ b1, b2])],
   P = [[ (a1, P_a1), (a2, P_a2)], [ (b1, P_b1), (b2, P_b2)]],
   V = [V1, V2],
   Form= (K1, V1,V2, P_a1, P_a2, P_b1, P_b2).

/*

% alternative. a non-recursive construction.

schema_for_tell_iterated_nash_map( [K1,A,P,V], Form):-
   length(A, L),
   L > 2,
   findall( Pj,
    (
     member((_J,Aj),A),
     findall((Ajk,Pjk),member(Ajk, Aj),Pj)
    ),
   P),
   findall( Pjk,
    (
     member(Pj,P),
     member((_,Pjk), Pj)
    ),
   Q),
   flatten( [K1, V, Q], X),
   list_to_tuple( X, Form).
*/

schema_for_tell_iterated_nash_map( [K1,A,P,V], Form):-
   length(A, L),
   L > 2,
   length(V, L),
   select_probability_values( A, P, Q),
   flatten( [K1, V, Q], X),
   list_to_tuple( X, Form).

select_probability_values( [],[],[]).
select_probability_values( [(J,Aj)|A],[Pj|P],[Qj|Q]):-
   select_probability_values( A,P,Q),
   select_probabilities_for_player( J,Aj,Pj,Qj).

select_probabilities_for_player( _,[],[],[]).
select_probabilities_for_player( J,[Ajk|Aj],[(Ajk,Pjk)|Pj],[Pjk|Qj]):-
   select_probabilities_for_player( J,Aj,Pj,Qj).


% end.

return to front page.