You selected pivotal.pl
title:-
A=[
'% Discrete Approximation of Vickrey-Clarke-Groves Mechanism ',
'% program: pivotal.pl (SWI-Prolog 5.0.9)',
'% created: 27-28 Jun 2004.',
'% author: Kenyo Indo (Kanto Gakuen University)',
'% --------main predicates ---------',
'% go/0, pivotal/1, pivotal_data/2. ',
'% find_best_response_of/5, nash_equilibrium/1. ',
'% truthful/3, truth_telling_profile/1. ',
'% explain/0, explain_j/0, reference.'
], forall(member(L,A),(nl,write(L))),nl.
reference:-
A=[
'% reference:',
'% [1] Clarke, E. (1971). Multipart pricing of public goods. Public Choice 11: 17-33. ',
'% [2] Groves, T. (1973). Incentives in teams. Econometrica 41: 617-31. ',
'% [3] Vickrey, W. (1961). Counterspeculation, auctions, and competitive sealed tenders. Journal of Finance 16: 8-37. '
], forall(member(L,A),(nl,write(L))).
% generalization : Groves mechanism.
explain:-
forall(explanation(E,F),(nl,write(E),nl, tab(2),write(F))).
explanation('******* Vickrey-Clarke-Groves (VCG) Mechanism *********','definitions').
explanation('cost of implementation for the project: ','r.').
explanation('social decision:','x= 1 (acceptance) or 0 (reject). ').
explanation('cost share or tax for individual i : ','m[i].').
explanation('utility function: ','u[i] = THETA[i,truth] * x - m[i]. (pseudo-linear)').
explanation('strategy space: ','each i reports a real number THETA[i,report]. (direct revelation)').
explanation('the profile of reports: ','n-dimensional real vector THETA.').
explanation('social choice rule: ','f(THETA) = ( x(THETA), m(THETA) ).').
explanation('(rule1) ','x(THETA) = 1 if sum(THETA[i]) >= r, otherwise 0. ').
explanation('(rule2) ','m[i](THETA) = x(THETA) * (r - sum(THETA[k\=i]) + h[i](THETA[k\=i]).').
explain_j:-
forall(explanation_j(E,F),(nl,write(E),nl, tab(2),write(F))).
explanation_j('**** ヴィクレー=クラーク=グローブズ(VCG)のメカニズム ****','定義').
explanation_j('プロジェクトの実施費用:','γ').
explanation_j('プロジェクトの採否:','x=0または1').
explanation_j('個人iの費用負担ないし税額:','m[i]').
explanation_j('効用関数:','u[i]=真のθ[i]x−m[i]').
explanation_j('戦略:','実数θ[i]について表明、表明の組:n次元の実数ベクトルθ').
explanation_j('社会選択ルール:','f(θ)=(x(θ)、m(θ))').
explanation_j('配分ルール:','θ[i]の和≧γのとき、x(θ)=1。それ以外は0。').
explanation_j('課税ルール:','m[i](θ)=x(θ)(γ−Σθ[j≠i])+h[i](θ[-i])').
%-------------------------------------
% Script for Mechanisms
%-------------------------------------
:- dynamic pivotal_data/2.
clear:-
abolish(pivotal_data/2).
go:-
clear,
go1.
go1:-
pivotal(_O),
fail.
go1:-
write(end).
%-------------------------------------
% Agents and Valuations
%-------------------------------------
agent(A):-agents(W),member(A,W).
% projects
object(A):-objects(W),member(A,W).
% same as example 1 in model_base/1.
agents([a,b,c]).
objects([x,y,z]).
% true valuation
% valuation(Agent,State,Object,Utility).
valuation(a,[x],5).
valuation(b,[x],-3).
valuation(c,[x],0).
valuation(a,[y],-2).
valuation(b,[y],5).
valuation(c,[y],0).
valuation(a,[z],-1).
valuation(b,[z],1).
valuation(c,[z],4).
% binary comparison.
prefer_to(Agent,O1,O2):-
valuation(Agent,O1,U1),
valuation(Agent,O2,U2),
U1 >= U2.
strategy_of(A,B):-
agent(A),
length(L,11),
nth1(K,L,_),
B is (K-1) - 5.
strategy_profile_0([],[]).
strategy_profile_0([A|B],[C|D]):-
strategy_profile_0(B,D),
strategy_of(A,C).
strategy_profile(B):-
agents(A),
strategy_profile_0(A,B).
%------------------------------------
% Cleark's Project Choice Mechanism
%------------------------------------
pivotal(O):-
object(O),
agents(I),
strategy_profile_0(I,P),
decision_rule(P,SUM,D), %allocation function
pivotal_tax(P,SUM,D,T), % transfer function
payoff(O,I,T,V),
assert(
pivotal_data(
[object:O,decision:D,surplus:SUM],
[agent:I,report:P,tax:T,payoff:V]
)
).
% decison_rule(+P,-SUM,-D)
%---------------------------
decision_rule(P,SUM,D):-
threshold_surplus(H),
sum(P,SUM),
(SUM>=H->D=accept;D=reject).
threshold_surplus(0).
% pivotal_tax(+I,+P,+SUM,-Accept,-T)
%---------------------------
pivotal_tax(P,SUM,accept,T):-
threshold_surplus(H),
findall(TK,
(
nth1(_K,P,Q),
SQ is SUM-Q,
(SQTK is SQ;TK is 0)
),
T).
pivotal_tax(P,SUM,reject,T):-
threshold_surplus(H),
findall(TK,
(
nth1(_K,P,Q),
SQ is SUM-Q,
(SQTK is 0;TK is SQ)
),
T).
% payoff(+O,+I,+T,-V)
%---------------------------
payoff(O,I,T,V):-
findall(VA,
(
nth1(K,I,A),
nth1(K,T,TA),
valuation(A,[O],UA),
VA is UA-TA
),
V).
%---------------------------
% find best response of agent
%---------------------------
:- dynamic nash_data/3.
find_best_response_of(A,O,PK,VK,PD):-
agents(I),
agent(A),
object(O),
nth1(K,I,A),
nth1(K,V,VK),
nth1(K,P,PK),
PD=pivotal_data(
[object:O,decision:_D,surplus:_SUM],
[agent:I,report:P,tax:_T,payoff:V]
),
(\+ clause(PD,_) ->go;true),
find_best_response_0([O,I,A,K,VK,P],PD).
find_best_response_0(X,PD):-
clause(nash_data(best,X,PD),_),
!.
find_best_response_0(X,PD):-
clause(nash_data(not_best,X,PD),_),
!,
fail.
find_best_response_0([O,I,A,K,VK,P],PD):-
X=[O,I,A,K,VK,P],
PD,
DD=pivotal_data(
[object:O,decision:_D1,surplus:_SUM1],
[agent:I,report:P1,tax:_T1,payoff:V1]
),
\+ (
DD,
forall(
nth1(J,P,PJ),
(nth1(J,P1,PJ);J=K)
),
nth1(K,V1,V1K),
V1K >VK,
assert(nash_data(not_best,X,PD)),
Y=[O,I,A,K,V1K,P1],
assert(nash_data(defeat(X),Y,DD))
),
assert(nash_data(best,X,PD)).
% (inefficient)
find_all_best_responses:-
(clause(nash_data(_,_,_),_)
->
(
write('Will you abolish all nash_data/3 ? (y/n) '),nl,
write('Note: It will take you some minutes. >'),
read(y)
);true
),
abolish(nash_data/3),
forall(
find_best_response_of(_A,_O,_B,_C,_PD),
true
).
nash_equilibrium(PD):-
(\+ clause(nash_data(_,_,_),_)->find_all_best_responses;true),
PD=pivotal_data(
[object:O,decision:_D,surplus:_SUM],
[agent:I,report:P,tax:_T,payoff:_V]
),
PD,
X=[O,I,A,_K,_VK,P],
forall(member(A,I),
nash_data(best,X,PD)
).
/*
?- find_all_best_responses.
Will you abolish all nash_data/3 ? (y/n) >y.
end
Yes
?- tell_goal('pivotal_ne.pl',forall,nash_data(_,_,_)). % Save Data to File.
Yes
?- nash_equilibrium(A).
A = pivotal_data([object:x, decision:reject, surplus: -30], [agent:[a, b, c], report:[-10, -10, -10], tax:[0, 0, 0], payoff:[10, -5, 0]]) ;
A = pivotal_data([object:x, decision:reject, surplus: -28], [agent:[a, b, c], report:[-8, -10, -10], tax:[0, 0, 0], payoff:[10, -5, 0]]) ;
A = pivotal_data([object:x, decision:reject, surplus: -26], [agent:[a, b, c], report:[-6, -10, -10], tax:[0, 0, 0], payoff:[10, -5, 0]])
Yes
?- findall(1,pivotal_data(_,_),A),length(A,N).
A = [1, 1, 1, 1, 1, 1, 1, 1, 1|...]
N = 3993
Yes
?- findall(1,pivotal_data(_,_),C),sum(C,M).
C = [1, 1, 1, 1, 1, 1, 1, 1, 1|...]
M = 3993
Yes
?- findall(1,nash_data(best,_,_),C),sum(C,M).
C = [1, 1, 1, 1, 1, 1, 1, 1, 1|...]
M = 8379
Yes
?- findall(1,nash_data(_,_,_),A),length(A,N).
A = [1, 1, 1, 1, 1, 1, 1, 1, 1|...]
N = 11979
Yes
?- findall(1,nash_equilibrium(_),A),length(A,N).
A = [1, 1, 1, 1, 1, 1, 1, 1, 1|...]
N = 2286
Yes
?-
*/
% optimality of truthfulness
%---------------------------
truthful(X,PD,Z):-
X=[O,I,A,K,VK,P],
object(O),
agents(I),
valuation(A,[O],PK),
nth1(K,I,A),
nth1(K,P,PK),
nth1(K,V,VK),
PD=pivotal_data(
[object:O,decision:_D,surplus:_SUM],
[agent:I,report:P,tax:_T,payoff:V]
),
PD,
is_best_response([A,O,PK,VK,PD],Z1),
Z=Z1.
is_best_response([A,O,PK,VK,PD],best):-
find_best_response_of(A,O,PK,VK,PD),
!.
is_best_response(_,not_best).
truth_telling_profile(PD):-
PD=pivotal_data(
[object:O,decision:_D,surplus:_SUM],
[agent:I,report:TT,tax:_T,payoff:_V]
),
PD,
findall(ZK,
(
member(A,I),
valuation(A,[O],ZK)
),
TT).
/*
?- truth_telling_profile(A), nash_equilibrium(A).
A = pivotal_data([object:x, decision:accept, surplus:2],
[agent:[a, b, c], report:[5, -3, 0], tax:[-3, 0, 0], payoff:[8, -3, 0]]) ;
A = pivotal_data([object:y, decision:accept, surplus:3],
[agent:[a, b, c], report:[-2, 5, 0], tax:[0, -2, 0], payoff:[-2, 7, 0]]) ;
A = pivotal_data([object:z, decision:accept, surplus:4],
[agent:[a, b, c], report:[-1, 1, 4], tax:[0, 0, 0], payoff:[-1, 1, 4]]) ;
No
?- truthful(A,B,not_best).
No
*/
%---------------------------
% Common Programs
%---------------------------
% sum
%---------------------------
sum([],0).
sum([X|Members],Sum):-
sum(Members,Sum1),
Sum is Sum1 + X.
% maximal solution for given goal clause : a naive solver
%---------------------------------------------------------
max(X,Goal):-
% X: the objective variable,
% Goal: the objective function and constraints,
setof((X,Goal),Goal,Z),
member((X,Goal),Z),
\+ (
member((Y,_),Z),
Y > X
).
% ----------------------------------------------------------- %
% output to file.
% ----------------------------------------------------------- %
tell_test(Goal):-
open('tell.txt',write,S),
tell('tell.txt'),
Goal,
current_stream('tell.txt',write,S),
tell(user),wn(end),
close(S).
%
tell_goal(File,G):-
(current_stream(File,write,S0)->close(S0);true),
open(File,write,S),
tell(File),
nl,
tstamp('% file output start time ',_),
nl,
write('%---------- start from here ------------%'),
nl,
G,
nl,
write('%---------- end of data ------------%'),
nl,
tstamp('% file output end time ',_),
tell(user),
close(S),
% The following is to cope with the duplicated stream problem.
(current_stream(File,write,S1)->close(S1);true).
% saving all successful goals to a file.
%--------------------------------
tell_goal(File,forall,G):-
G0 = (nl,write(G),write('.')),
G1 = forall(G,G0),
tell_goal(File,G1).
% time stamp
%--------------------------------
tstamp(no,T):-
get_time(U),
convert_time(U,A,B,C,D,E,F,_G),
T = [date(A/B/C), time(D:E:F)],
nl.
tstamp(Word,T):-
\+ var(Word),
Word \= no,
get_time(U),
convert_time(U,A,B,C,D,E,F,_G),
T = [date(A/B/C), time(D:E:F)],
% format('~`.t~t~a~30|~`.t~t~70|~n',[Word]),
write((Word,T)),
nl.
:- title.
% end
return to front page.