diff --git a/.travis.yml b/.travis.yml
index 5aadcdc..7ec51f2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,7 +15,7 @@ install:
   - pip install Flask==$FLASK
 
 before_script:
-  - python tools/create_db.py --force -m all
+  - python scripts/create_db.py --force -m all
   - mkdir filecache
   - chmod -R 0777 filecache 
   - python scrape.py -g 30649
diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md
index 9c5fec2..9e0c9e4 100644
--- a/DEPLOYMENT.md
+++ b/DEPLOYMENT.md
@@ -6,7 +6,7 @@ you are deploying)
 To deploy a server/database for a new municipality, follow these steps:
   1. Make sure the GeoJSON map file with the name of the municipality has 
      been added to the [map repository](http://github.com/niryariv/israel_gushim)
-  2. Run `fab create_server:holon,"חולון"`. This will add the new gush ids to the tools/gushim.py file, create & configure the new Heroku app / MongoDB, and finally run the scraper to get all municipality's plans. 
+  2. Run `fab create_server:holon,"חולון"`. This will add the new gush ids to the lib/gushim.py file, create & configure the new Heroku app / MongoDB, and finally run the scraper to get all municipality's plans. 
   3. When the task finishes running, a browser window (or tab) will be open with 
      the new app's scheduler dashboard. Add a new scheduled task with the 
      command: `python scrape.py -g all ; python worker.py`. Do not change dyno settings.
@@ -23,6 +23,21 @@ To deploy a new municipality, run: `fab create_client:holon,"חולון"` after
 To change client configuration, you can edit `munis.js` manually later on, according to the [Municipality 
      Index File syntax](http://github.com/niryariv/opentaba-client/blob/master/DEPLOYMENT.md#municipality-index-file).
 
+##Automatic Facebook and Twitter Posting
+The server is able to post a plan's content to a Facebook page and Twitter feed every time a plan is created or updated, using a running instance of [opentaba-poster](https://github.com/florpor/opentaba-poster).
+To enable this feature, environment variables need to be set on the server with things like access tokens, consumer keys etc.
+You can enable Facebook only, Twitter only or both.
+
+###Environment Variables
+####Poster
+To enable social posting, we must be configured to work with an instance of [opentaba-poster](https://github.com/florpor/opentaba-poster).
+To do that, we must make sure we are defined as a poster on the opentaba-poster app, and then set two environment variables -
+`POSTER_SERVICE_URL` must be set to the url of the opentaba-poster app, and `POSTER_ID` must be set to our assigned id, eg:
+```
+heroku config:set POSTER_SERVICE_URL="http://poster.service.com/" --app opentaba-server-holon
+heroku config:set POSTER_ID="holon_id" --app opentaba-server-holon
+```
+
 ##All Fabric Tasks
 ###Server
 + `fab create_server:muni_name, "display_name"`
@@ -37,9 +52,9 @@ To change client configuration, you can edit `munis.js` manually later on, accor
   ignore_errors is set to false by default because if this task fails it most
   likely means the app does not exist to begin with.
 
-+ `fab update_gushim_server:muni_name` Update the [tools/gushim.py](tools/gushim.py) file with the
++ `fab update_gushim_server:muni_name` Update the [lib/gushim.py](lib/gushim.py) file with the
   gushim of a new municipality or the updated ones of an existing municipality.
-  This task downloads the gush map file from [israel_gushim](http://github.com/niryariv/israel_gushim), parses its  data, and if there are new gushim it updates the [tools/gushim.py](tools/gushim.py) file and the 
+  This task downloads the gush map file from [israel_gushim](http://github.com/niryariv/israel_gushim), parses its  data, and if there are new gushim it updates the [lib/gushim.py](lib/gushim.py) file and the 
   [Tests/functional_tests/test_return_json.py](Tests/functional_tests/test_return_json.py) file (with the new amount of gushim), commits and pushes on the master branch. Note that this task does not deploy
   anywhere, and the new gushim data will not exist on active servers until you
   deploy changes to them.
@@ -50,10 +65,10 @@ To change client configuration, you can edit `munis.js` manually later on, accor
 + `fab deploy_server_all` Find servers by looking at your `heroku list` and filtering
   out the ones that don't match our server name pattern. Run deploy_server task
   on each of the discovered servers.
-+ `fab create_db:muni_name` Run the [tools/create_db.py](tools/create_db.py) script on the given
++ `fab create_db:muni_name` Run the [scripts/create_db.py](scripts/create_db.py) script on the given
   municipality's heroku app. Will only create db for the given municipality's
   gushim.
-+ `fab update_db:muni_name` Run the [tools/update_db.py](tools/update_db.py) script on the given
++ `fab update_db:muni_name` Run the [scripts/update_db.py](scripts/update_db.py) script on the given
   municipality's heroku app. Will only update db for the given municipality's
   gushim.
 + `fab scrape:muni_name,<show_output=False|True>` Run the [scrape.py](scrape.py) script on the
@@ -66,6 +81,9 @@ To change client configuration, you can edit `munis.js` manually later on, accor
 + `fab refresh_db:muni_name` Update the DB with new gushim via update_db and run scrape tasks
 + `fab refresh_db_all` Find servers by looing at your `heroku list` and filtering
   by our naming pattern. Run the refresh_db task on each one discovered.
++ `fab sync_poster:muni_name,min_date` Run the [scripts/sync_poster.py](scripts/sync_poster.py) script on the given
+  municipality's heroku app. min_date is the minimum date of plans to post, 
+  and should be of the format: 1/1/2015.
 
 ###Client
 + `fab create_client:muni_name,"display_name"` For client creation, all we need
diff --git a/Tests/functional_tests/test_return_json.py b/Tests/functional_tests/test_return_json.py
index 1f07c34..6d69bcc 100644
--- a/Tests/functional_tests/test_return_json.py
+++ b/Tests/functional_tests/test_return_json.py
@@ -97,7 +97,7 @@ def test_api_get_plan():
     eq_(response.mimetype, 'application/json')
 
     # I don't know the correct number, since it's changes with each update, but it should be more then this
-    assert_true(len(j) >= 19)
+    assert_true(len(j) >= 17)
 
 
 def test_api_wakeup():
diff --git a/Tests/unit_test/test_scrape.py b/Tests/unit_test/test_scrape.py
index 6c77d06..919b434 100644
--- a/Tests/unit_test/test_scrape.py
+++ b/Tests/unit_test/test_scrape.py
@@ -3,7 +3,7 @@
 from app import app
 from nose.tools import eq_, assert_true
 from nose import with_setup
-from tools.scrapelib import scrape_gush
+from lib.scrapelib import scrape_gush
 import os
 
 testapp = app.test_client()
diff --git a/app.py b/app.py
index fdd7bbf..73fa6fe 100644
--- a/app.py
+++ b/app.py
@@ -12,79 +12,14 @@
 from flask import Flask
 from flask import abort, make_response, request
 
-from tools.conn import *
-from tools.gushim import GUSHIM
-from tools.cache import cached, _setup_cache
+from lib.conn import *
+from lib.cache import cached, _setup_cache
+import lib.helpers as helpers
 
 app = Flask(__name__)
 app.debug = RUNNING_LOCAL # if we're local, keep debug on
 
 
-#### Helpers ####
-
-def _get_plans(count=1000, query={}):
-    return list(db.plans.find(query, limit=count).sort(
-        [("year", pymongo.DESCENDING), ("month", pymongo.DESCENDING), ("day", pymongo.DESCENDING)]))
-
-
-def _get_gushim(query={}, fields=None):
-    return list(db.gushim.find(query, fields=fields))
-
-
-def _create_response_json(data):
-    """
-    Convert dictionary to JSON. json_util.default adds automatic mongoDB result support
-    """
-    r = make_response(json.dumps(data, ensure_ascii=False, default=json_util.default))
-    r.headers['Access-Control-Allow-Origin'] = "*"
-    r.headers['Content-Type'] = "application/json; charset=utf-8"
-    return r
-
-
-def _create_response_atom_feed(request, plans, feed_title=''):
-    """
-    Create an atom feed of plans fetched from the DB based on an optional query
-    """
-    feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root)
-
-    for p in plans:
-        url = p['details_link']
-        
-        # special emphasizing for some statuses
-        if p['status'] in [u'פרסום ההפקדה', u'פרסום בעיתונות להפקדה']:
-            status = u'»»%s««' % p['status']
-        else:
-            status = p['status']
-        
-        content = p['essence'] + ' [' + status + ', ' + '%02d/%02d/%04d' % (p['day'], p['month'], p['year']) + \
-            ', ' + p['number'] + ']'
-        title = p['location_string']
-        # 'not title' is not supposed to happen anymore because every plan currently has a location
-        if not title:
-            title = p['number']
-        
-        if p['mavat_code'] == '':
-            links = [{'href' : 'http://www.mavat.moin.gov.il/MavatPS/Forms/SV3.aspx?tid=4&tnumb=' + p['number'], 'rel': 'related', 'title': u'מבא"ת'}]
-        else:
-            links = [{'href': '%splan/%s/mavat' % (request.url_root, p['plan_id']), 'rel': 'related', 'title': u'מבא"ת'}]
-
-        feed.add(
-            title=title,
-            content=content,
-            content_type='html',
-            author="OpenTABA.info",
-            # id=url + '&status=' + p['status'], 
-            # ^^ it seems like the &tblView= value keeps changing in the URL, which causes the ID to change and dlvr.it to republish items.
-            id="%s-%s" % (title, p['status']),
-            # this is a unique ID (not real URL) so adding status to ensure uniqueness in TBA stages
-            url=url,
-            links=links,
-            updated=datetime.date(p['year'], p['month'], p['day'])
-        )
-
-    return feed
-
-
 #### Cache Helper ####
 
 @app.before_first_request
@@ -105,18 +40,14 @@ def get_gushim():
     get gush_id metadata
     """
     detailed = request.args.get('detailed', '') == 'true'
-    gushim = _get_gushim(fields={'gush_id': True, 'last_checked_at': True, '_id': False})
+    gushim = helpers._get_gushim(fields={'gush_id': True, 'last_checked_at': True, '_id': False})
     if detailed:
         # Flatten list of gushim into a dict
         g_flat = dict((g['gush_id'], {"gush_id": g['gush_id'],
                                       "last_checked_at": g['last_checked_at'],
                                       "plan_stats": {}}) for g in gushim)
         # Get plan statistics from DB
-        stats = db.plans.aggregate([
-            {"$unwind" : "$gushim" },
-            {"$project": {"gush_id": "$gushim", "status": "$status", "_id": 0}},
-            {"$group": {"_id": {"gush_id": "$gush_id", "status": "$status"}, "count": {"$sum": 1}}}
-        ])
+        stats = helpers._get_plan_statistics()
 
         # Merge stats into gushim dict
         for g in stats['result']:
@@ -132,7 +63,7 @@ def get_gushim():
         # De-flatten our dict
         gushim = g_flat.values()
 
-    return _create_response_json(gushim)
+    return helpers._create_response_json(gushim)
 
 
 @app.route('/gush/<gush_id>.json')
@@ -141,10 +72,10 @@ def get_gush(gush_id):
     """
     get gush_id metadata
     """
-    gush = _get_gushim(query={"gush_id": gush_id})
+    gush = helpers._get_gushim(query={"gush_id": gush_id})
     if gush is None or len(gush) == 0:
         abort(404)
-    return _create_response_json(gush[0])
+    return helpers._create_response_json(gush[0])
 
 
 @app.route('/gush/<gushim>/plans.json')
@@ -160,7 +91,7 @@ def get_plans(gushim):
     else:
         gushim_query = {'gushim': gushim[0]}
 
-    return _create_response_json(_get_plans(query=gushim_query))
+    return helpers._create_response_json(helpers._get_plans(query=gushim_query))
 
 
 @app.route('/recent.json')
@@ -169,7 +100,7 @@ def get_recent_plans():
     """
     Get the 10 most recent plans to show on the site's home page
     """
-    return _create_response_json(_get_plans(count=10))
+    return helpers._create_response_json(helpers._get_plans(count=10))
 
 
 @app.route('/plans.atom')
@@ -180,7 +111,7 @@ def atom_feed():
     else:
         title = u'תב"ע פתוחה'
     
-    return _create_response_atom_feed(request, _get_plans(count=20), feed_title=title).get_response()
+    return helpers._create_response_atom_feed(request, helpers._get_plans(count=20), feed_title=title).get_response()
 
 
 @app.route('/gush/<gushim>/plans.atom')
@@ -196,7 +127,7 @@ def atom_feed_gush(gushim):
     else:
         gushim_query = {'gushim': gushim[0]}
     
-    return _create_response_atom_feed(request, _get_plans(query=gushim_query), feed_title=u'תב״ע פתוחה - גוש %s' % ', '.join(gushim)).get_response()
+    return helpers._create_response_atom_feed(request, helpers._get_plans(query=gushim_query), feed_title=u'תב״ע פתוחה - גוש %s' % ', '.join(gushim)).get_response()
 
 
 @app.route('/plans/search/<path:plan_name>')
@@ -205,7 +136,7 @@ def find_plan(plan_name):
     """
     Find plans that contain the search query and return a json array of their plan and gush ids
     """
-    return _create_response_json(_get_plans(count=3, query={'number': {'$regex': '.*%s.*' % plan_name}}))
+    return helpers._create_response_json(helpers._get_plans(count=3, query={'number': {'$regex': '.*%s.*' % plan_name}}))
 
 
 @app.route('/plan/<plan_id>/mavat')
@@ -246,7 +177,7 @@ def wakeup():
     wake up Heroku dyno from idle. perhaps can if >1 dynos
     used as endpoint for a "wakeup" request when the client inits
     """
-    return _create_response_json({'morning': 'good'})
+    return helpers._create_response_json({'morning': 'good'})
 
 
 #### MAIN ####
diff --git a/fabfile.py b/fabfile.py
index d7f73ad..8158a47 100644
--- a/fabfile.py
+++ b/fabfile.py
@@ -9,7 +9,7 @@
 from scripts.client_fabfile import create_client
 
 from scripts.server_fabfile import create_server, delete_server, update_gushim_server, deploy_server, deploy_server_all, create_db
-from scripts.server_fabfile import update_db, scrape, renew_db, renew_db_all, refresh_db, refresh_db_all
+from scripts.server_fabfile import update_db, scrape, renew_db, renew_db_all, refresh_db, refresh_db_all, sync_poster
 
 
 @task
diff --git a/tools/__init__.py b/lib/__init__.py
similarity index 100%
rename from tools/__init__.py
rename to lib/__init__.py
diff --git a/tools/cache.py b/lib/cache.py
similarity index 100%
rename from tools/cache.py
rename to lib/cache.py
diff --git a/tools/conn.py b/lib/conn.py
similarity index 100%
rename from tools/conn.py
rename to lib/conn.py
diff --git a/tools/gushim.py b/lib/gushim.py
similarity index 100%
rename from tools/gushim.py
rename to lib/gushim.py
diff --git a/lib/helpers.py b/lib/helpers.py
new file mode 100644
index 0000000..13c8cb2
--- /dev/null
+++ b/lib/helpers.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+
+"""
+Helpers for our web and worker (scraper) instances
+"""
+
+from werkzeug.contrib.atom import AtomFeed
+from flask import make_response
+import json
+from bson import json_util
+import datetime
+import pymongo
+
+from conn import db
+
+
+def _get_plans(count=1000, query={}):
+    return list(db.plans.find(query, limit=count).sort(
+        [("year", pymongo.DESCENDING), ("month", pymongo.DESCENDING), ("day", pymongo.DESCENDING)]))
+
+
+def _get_gushim(query={}, fields=None):
+    return list(db.gushim.find(query, fields=fields))
+
+
+def _get_plan_statistics():
+    return db.plans.aggregate([
+            {"$unwind" : "$gushim" },
+            {"$project": {"gush_id": "$gushim", "status": "$status", "_id": 0}},
+            {"$group": {"_id": {"gush_id": "$gush_id", "status": "$status"}, "count": {"$sum": 1}}}
+        ])
+
+
+def _create_response_json(data):
+    """
+    Convert dictionary to JSON. json_util.default adds automatic mongoDB result support
+    """
+    r = make_response(json.dumps(data, ensure_ascii=False, default=json_util.default))
+    r.headers['Access-Control-Allow-Origin'] = "*"
+    r.headers['Content-Type'] = "application/json; charset=utf-8"
+    return r
+
+
+def _create_response_atom_feed(request, plans, feed_title=''):
+    """
+    Create an atom feed of plans fetched from the DB based on an optional query
+    """
+    feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root)
+
+    for p in plans:
+        formatted = _format_plan(p, request.url_root)
+
+        feed.add(
+            title=formatted['title'],
+            content=formatted['content'],
+            content_type='html',
+            author="OpenTABA.info",
+            # id=url + '&status=' + p['status'], 
+            # ^^ it seems like the &tblView= value keeps changing in the URL, which causes the ID to change and dlvr.it to republish items.
+            id="%s-%s" % (formatted['title'], p['status']),
+            # this is a unique ID (not real URL) so adding status to ensure uniqueness in TBA stages
+            url=formatted['url'],
+            links=formatted['links'],
+            updated=formatted['last_update']
+        )
+
+    return feed
+
+
+def _format_plan(plan, server_root=None):
+    """
+    Take a plan and format it for atom feed and social networks
+    """
+    formatted_plan = {}
+    
+    formatted_plan['url'] = plan['details_link']
+        
+    # special emphasizing for some statuses
+    if plan['status'] in [u'פרסום ההפקדה', u'פרסום בעיתונות להפקדה']:
+        formatted_plan['status'] = u'»»%s««' % plan['status']
+    else:
+        formatted_plan['status'] = plan['status']
+    
+    # the plan's content
+    formatted_plan['content'] = plan['essence'] + ' [' + formatted_plan['status'] + ', ' + \
+        '%02d/%02d/%04d' % (plan['day'], plan['month'], plan['year']) + ', ' + plan['number'] + ']'
+    
+    # the title
+    formatted_plan['title'] = plan['location_string']
+    # 'not title' is not supposed to happen anymore because every plan currently has a location
+    if not formatted_plan['title']:
+        formatted_plan['title'] = plan['number']
+    
+    # mavat link - if we have a code and the base url for this server (currently only from the atom feed) we can give a direct link
+    # (through our server). otherwise link to the search page with parameters
+    if plan['mavat_code'] == '' or server_root is None:
+        formatted_plan['links'] = [{'href' : 'http://www.mavat.moin.gov.il/MavatPS/Forms/SV3.aspx?tid=4&tnumb=' + plan['number'], 'rel': 'related', 'title': u'מבא"ת'}]
+    else:
+        formatted_plan['links'] = [{'href': '%splan/%s/mavat' % (server_root, plan['plan_id']), 'rel': 'related', 'title': u'מבא"ת'}]
+    
+    # plan last update
+    formatted_plan['last_update'] = datetime.date(plan['year'], plan['month'], plan['day'])
+    
+    return formatted_plan
+
+
+"""
+A small class to enable json-serializing of datetime.date objects
+To use it: json.dumps(json_object, cls=helpers.DateTimeEncoder)
+"""
+class DateTimeEncoder(json.JSONEncoder):
+    def default(self, obj):
+       if hasattr(obj, 'isoformat'):
+           return obj.isoformat()
+       else:
+           return json.JSONEncoder.default(self, obj)
diff --git a/tools/mavat_scrape.py b/lib/mavat_scrape.py
similarity index 100%
rename from tools/mavat_scrape.py
rename to lib/mavat_scrape.py
diff --git a/tools/mmi_scrape.py b/lib/mmi_scrape.py
similarity index 100%
rename from tools/mmi_scrape.py
rename to lib/mmi_scrape.py
diff --git a/tools/scrapelib.py b/lib/scrapelib.py
similarity index 96%
rename from tools/scrapelib.py
rename to lib/scrapelib.py
index 59d2c4c..79c450d 100644
--- a/tools/scrapelib.py
+++ b/lib/scrapelib.py
@@ -6,13 +6,15 @@
 import logging
 import json
 import datetime
+import os
 from hashlib import md5
 from copy import deepcopy
 from multiprocessing.pool import ThreadPool
 
-from conn import *
+from conn import db, RUNNING_LOCAL
 from mmi_scrape import get_mmi_gush_json
 from mavat_scrape import get_mavat_gush_json
+import sociallib
 
 date_pattern = re.compile(r'(\d+/\d+/\d+)')
 mmi_bad_plan_number_no_slash_pattern = re.compile(ur'^(.*[0-9]+)([א-ת])$')
@@ -234,6 +236,9 @@ def scrape_gush(gush, RUN_FOLDER=False, TESTING=False):
             plan['gushim'] = [ gush_id ]
             log.debug("Inserting new plan data: %s", plan)
             db.plans.insert(plan)
+            
+            # post plan to social networks
+            sociallib.post(plan)
         else:
             # since the plan exists get it's _id and gushim values
             plan['_id'] = existing_plan['_id']
@@ -245,6 +250,9 @@ def scrape_gush(gush, RUN_FOLDER=False, TESTING=False):
                 # since we are sending an _id value the document will be updated
                 log.debug("Updating modified plan data: %s", plan)
                 db.plans.save(plan)
+                
+                # post plan to social networks
+                sociallib.post(plan)
             else:
                 # compare the values. maybe the plan wasn't modified at all
                 plan_copy = deepcopy(plan)
@@ -255,6 +263,9 @@ def scrape_gush(gush, RUN_FOLDER=False, TESTING=False):
                     # since we are sending an _id value the document will be updated
                     log.debug("Updating modified plan data: %s", plan)
                     db.plans.save(plan)
+                    
+                    # post plan to social networks
+                    sociallib.post(plan)
             
                 # just make sure these are deleted because we will probably have quite a few iterations here
                 del plan_copy
diff --git a/lib/sociallib.py b/lib/sociallib.py
new file mode 100644
index 0000000..d2d12e8
--- /dev/null
+++ b/lib/sociallib.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+import requests
+import logging
+import os
+from json import dumps
+
+import helpers as helpers
+
+log = logging.getLogger(__name__)
+
+
+def post(plan):
+    if all(param in os.environ.keys() for param in ['POSTER_SERVICE_URL', 'POSTER_ID']):
+        # generate a formatted plan and the post data
+        formatted_plan = helpers._format_plan(plan)
+        post_data = {'poster_id': os.environ['POSTER_ID'], 'title': formatted_plan['title'], 'content': formatted_plan['content'], 'url': formatted_plan['url']}
+        
+        # send data to social poster service. we just get an ok and continue, it's up to the service to take care of errors and such
+        requests.post('%s/post' % os.environ['POSTER_SERVICE_URL'].rstrip('/'), data=post_data)
diff --git a/readme.md b/readme.md
index 2fabcbd..175b9c0 100644
--- a/readme.md
+++ b/readme.md
@@ -16,7 +16,7 @@ The code is Flask based, working with MongoDB as database, Uses redis to handle
 Notice that if you are running this on a local dev machine you need to have mongodb running and listening in port 27017
 #### Create initial DB
 
-    python create_db --force -m [all | <muni>]
+    python scripts/create_db --force -m [all | <muni>]
 
 #### Scrape data into DB
 
diff --git a/scrape.py b/scrape.py
index 2b5d4fd..06528f0 100644
--- a/scrape.py
+++ b/scrape.py
@@ -7,8 +7,8 @@
 from optparse import OptionParser, SUPPRESS_HELP
 from rq import Queue
 from app import app
-from tools.conn import *
-from tools.scrapelib import scrape_gush
+from lib.conn import *
+from lib.scrapelib import scrape_gush
 from worker import redis_conn
 
 
diff --git a/tools/create_db.py b/scripts/create_db.py
similarity index 92%
rename from tools/create_db.py
rename to scripts/create_db.py
index b5a8b34..73c1118 100644
--- a/tools/create_db.py
+++ b/scripts/create_db.py
@@ -1,7 +1,12 @@
 #!/usr/bin/python
 
-from conn import *
-from gushim import GUSHIM
+# allow ourselves to import from the parent and current directory
+import sys
+sys.path.insert(0, '../')
+sys.path.insert(0, '.')
+
+from lib.conn import *
+from lib.gushim import GUSHIM
 from optparse import OptionParser
 
 parser = OptionParser()
diff --git a/scripts/server_fabfile.py b/scripts/server_fabfile.py
index 368348d..6f6eea3 100644
--- a/scripts/server_fabfile.py
+++ b/scripts/server_fabfile.py
@@ -102,15 +102,15 @@ def update_gushim_server(muni_name):
     # make sure we're using the master branch
     local('git checkout master')
     
-    # open and load the existing gushim dictionary from tools/gushim.py
-    with open(os.path.join('tools', 'gushim.py')) as gushim_data:
+    # open and load the existing gushim dictionary from lib/gushim.py
+    with open(os.path.join('lib', 'gushim.py')) as gushim_data:
         existing_gushim = loads(gushim_data.read().replace('GUSHIM = ', ''))
     
     # if the municipality already exists replace it's list, otherwise create a new one
     existing_gushim[muni_name] = {'list': gush_ids}
     
-    # write the dictionary back to tools/gushim.py
-    out = open(os.path.join('tools', 'gushim.py'), 'w')
+    # write the dictionary back to lib/gushim.py
+    out = open(os.path.join('lib', 'gushim.py'), 'w')
     out.write('GUSHIM = ' + dumps(existing_gushim, sort_keys=True, indent=4, separators=(',', ': ')))
     out.flush()
     os.fsync(out.fileno())
@@ -135,13 +135,13 @@ def update_gushim_server(muni_name):
     out.close()
     
     # commit and push to origin
-    local('git add %s' % os.path.join('tools', 'gushim.py'))
+    local('git add %s' % os.path.join('lib', 'gushim.py'))
     local('git add %s' % os.path.join('Tests', 'functional_tests', 'test_return_json.py'))
     local('git commit -m "added gushim and updated tests for %s"' % muni_name)
     local('git push origin master')
     
     print '*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*'
-    print 'The new/updated gushim data was added to tools/gushim.py and the test file '
+    print 'The new/updated gushim data was added to lib/gushim.py and the test file '
     print 'Tests/functional_tests/test_return_json.py was updated.'
     print 'Both files were successfuly comitted and pushed to origin.'
     print '*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*X*'
@@ -170,7 +170,7 @@ def create_db(muni_name):
     
     _heroku_connect()
     
-    local('heroku run "python tools/create_db.py --force -m %s" --app %s' % (muni_name, _get_server_full_name(muni_name)))
+    local('heroku run "python scripts/create_db.py --force -m %s" --app %s' % (muni_name, _get_server_full_name(muni_name)))
 
 
 @task
@@ -179,7 +179,7 @@ def update_db(muni_name):
     
     _heroku_connect()
     
-    local('heroku run "python tools/update_db.py --force -m %s" --app %s' % (muni_name, _get_server_full_name(muni_name)))
+    local('heroku run "python scripts/update_db.py --force -m %s" --app %s' % (muni_name, _get_server_full_name(muni_name)))
 
 
 @task
@@ -225,3 +225,11 @@ def refresh_db_all():
     
     for server in _get_servers():
         refresh_db(_get_muni_name(server))
+
+@task
+def sync_poster(muni_name, min_date):
+    """Run the sync_poster script file on a certain heroku app"""
+    
+    _heroku_connect()
+    
+    local('heroku run "python scripts/sync_date.py -m %s -q" --app %s' % (min_date, _get_server_full_name(muni_name)))
diff --git a/scripts/sync_poster.py b/scripts/sync_poster.py
new file mode 100644
index 0000000..957c0c4
--- /dev/null
+++ b/scripts/sync_poster.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+# allow ourselves to import from the parent and current directory
+import sys
+sys.path.insert(0, '../')
+sys.path.insert(0, '.')
+
+import os
+import datetime
+from optparse import OptionParser
+from time import sleep
+import requests
+
+from lib.conn import *
+import lib.helpers as helpers
+from lib.sociallib import post
+
+# can't communicate with poster service without these
+if not all(param in os.environ.keys() for param in ['POSTER_SERVICE_URL', 'POSTER_ID']):
+    print 'Environment variables POSTER_SERVICE_URL and POSTER_ID must both be set!'
+    exit(1)
+
+parser = OptionParser()
+parser.add_option('-m', dest='min_date', help='minimum date for plans to be sent to poster service. if not supplied, ALL plans will be sent. format: 1/1/2015')
+parser.add_option('-q', dest='quiet', default=False, action='store_true', help='quiet, don\'t prompt for user approval')
+parser.add_option('-d', dest='dont_wait', default=False, action='store_true', help='don\'t wait for poster service to post everything')
+
+(options, args) = parser.parse_args()
+
+if options.min_date:
+    # make sure the min_date parses fine
+    try:
+        min_date = datetime.datetime.strptime(options.min_date, '%d/%m/%Y')
+    except:
+        print 'Invalid minimum date. Format is 1/1/2015'
+        exit(1)
+    
+    # build min_date query
+    plans_query = {'$or': [ {'year': {'$gt': min_date.year}}, {'year': min_date.year, 'month': {'$gt': min_date.month}}, {'year': min_date.year, 'month': min_date.month, 'day': {'$gte': min_date.day}} ]}
+else:
+    # no query
+    plans_query = {}
+
+# get valid plans
+plans = helpers._get_plans(query=plans_query)
+
+# if not quiet, make sure the user is ok with this
+if not options.quiet:
+    while 1:
+        if not options.min_date:
+            sys.stdout.write('No minimum date was supplied.\nAre you sure you want ALL %s plans to be synced? [y/N] ' % len(plans))
+        else:
+            sys.stdout.write('Are you sure you want %s plans to be synced? [y/N] ' % len(plans))
+        
+        choice = raw_input().lower()
+        if choice == 'n' or choice == 'no':
+            exit()
+        elif choice == 'y' or choice == 'yes':
+            break
+
+print 'Posting plans... (may take up to a few minutes, depending on how many are sent)'
+
+# reverse the list so that we send the service the earlier plans first (service's queue is fifo)
+for plan in reversed(plans):
+    post(plan)
+
+if not options.dont_wait:
+    status = 10
+    
+    while status > 1:
+        # wait for 15 seconds then poke
+        sleep(15)
+        
+        print 'Poking poster service for status...'
+        
+        # get the /status page and parse the number in the output
+        r = requests.get('%s/status' % os.environ['POSTER_SERVICE_URL'].rstrip('/'))
+        for s in r.text.split():
+            if s.isdigit():
+                status = int(s)
+        
+        print 'Approximately %s posts remaining...' % status
+    
+    print 'Poster done!'
diff --git a/tools/update_db.py b/scripts/update_db.py
similarity index 89%
rename from tools/update_db.py
rename to scripts/update_db.py
index d7f6040..62e5300 100644
--- a/tools/update_db.py
+++ b/scripts/update_db.py
@@ -1,7 +1,12 @@
 #!/usr/bin/env python2
 
-from conn import *
-from gushim import GUSHIM
+# allow ourselves to import from the parent and current directory
+import sys
+sys.path.insert(0, '../')
+sys.path.insert(0, '.')
+
+from lib.conn import *
+from lib.gushim import GUSHIM
 from optparse import OptionParser
 
 parser = OptionParser()
diff --git a/worker.py b/worker.py
index fa7d2dd..3b612dd 100644
--- a/worker.py
+++ b/worker.py
@@ -5,12 +5,15 @@
 import os
 import redis
 from rq import Worker, Queue, Connection
+import logging
 
 listen = ['high', 'default', 'low']
 redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
 redis_conn = redis.from_url(redis_url)
 
 if __name__ == '__main__':
+    logging.basicConfig(format='%(asctime)-15s %(name)s %(levelname)s %(message)s', level=logging.WARNING)
+    
     with Connection(redis_conn):
         worker = Worker(map(Queue, listen))
         worker.work(burst=True)