Initialize Mk2

This commit is contained in:
Linus2punkt0
2024-01-19 23:33:39 +01:00
parent 480d759b37
commit 7fdfbbd03e
75 changed files with 3357 additions and 629 deletions

4
.gitignore vendored
View File

@@ -1,5 +1,7 @@
__pycache__
database.json
database.bak
logs/*.log*
backups/*.bak*
.env
dbhost/
*.jpg

0
backups/.gitkeep Normal file
View File

View File

@@ -1,553 +1,21 @@
from atproto import Client
import tweepy
from mastodon import Mastodon
from datetime import datetime, timedelta
from auth import *
from paths import *
import settings
import json, os, urllib.request, random, string, shutil, re
from settings.auth import *
from settings.paths import *
from local.functions import write_log, cleanup, post_cache_read, post_cache_write, get_post_time_limit
from local.db import db_read, db_backup, save_db
from input.bluesky import get_posts
from output.post import post
date_in_format = '%Y-%m-%dT%H:%M:%S'
# Setting up connections to bluesky, twitter and mastodon
bsky = Client()
bsky.login(bsky_handle, bsky_password)
# After changes in twitters API we need to use tweepy.Client to make posts as it uses version 2.0 of the API.
# However, uploading images is still not included in 2.0, so for that we need to use tweepy.API, which uses
# the previous version.
if settings.Twitter:
twitter = tweepy.Client(consumer_key=TWITTER_APP_KEY,
consumer_secret=TWITTER_APP_SECRET,
access_token=TWITTER_ACCESS_TOKEN,
access_token_secret=TWITTER_ACCESS_TOKEN_SECRET)
tweepy_auth = tweepy.OAuth1UserHandler(TWITTER_APP_KEY, TWITTER_APP_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
twitter_images = tweepy.API(tweepy_auth)
if settings.Mastodon:
mastodon = Mastodon(
access_token = MASTODON_TOKEN,
api_base_url = MASTODON_INSTANCE
)
# Getting posts from bluesky
def getPosts():
writeLog("Gathering posts")
posts = {}
# Getting feed of user
profile_feed = bsky.app.bsky.feed.get_author_feed({'actor': bsky_handle})
for feed_view in profile_feed.feed:
if feed_view.post.author.handle != bsky_handle:
continue
# Post type "post" means it is not a quote post.
postType = "post"
# If post has an embed of type record it is a quote post, and should not be crossposted
cid = feed_view.post.cid
text = feed_view.post.record.text
# Sometimes bluesky shortens URLs and in that case we need to restore them before crossposting
if feed_view.post.record.facets:
text = restoreUrls(feed_view.post.record)
langs = feed_view.post.record.langs
timestamp = datetime.strptime(feed_view.post.indexed_at.split(".")[0], date_in_format) + timedelta(hours = 2)
# Setting replyToUser to the same as user handle and only changing it if the tweet is an actual reply.
# This way we can just check if the variable is the same as the user handle later and send through
# both tweets that are not replies, and posts that are part of a thread.
replyToUser = bsky_handle
replyTo = ""
# Checking if post is a quote post. Posts with references to feeds look like quote posts but aren't, and so will fail on missing attribute.
# Since quote posts can give values in two different ways it's a bit of a hassle to double check if it is an actual quote post,
# so instead I just try to run the function and if it fails I skip the post
# If there is some reason you would want to crosspost a post referencing a bluesky-feed that I'm not seeing, I might update this in the future.
if feed_view.post.embed and hasattr(feed_view.post.embed, "record"):
try:
replyToUser, replyTo = getQuotePost(feed_view.post.embed.record)
postType = "quote"
except:
writeLog("Post is of a type the crossposter can't parse.")
continue
# Checking if post is regular reply
elif feed_view.post.record.reply:
postType = "reply"
replyTo = feed_view.post.record.reply.parent.cid
# Poster will try to fetch reply to-username the "ordinary" way,
# and if it fails, it will try getting the entire thread and
# finding it that way
try:
replyToUser = feed_view.reply.parent.author.handle
except:
replyToUser = getReplyToUser(feed_view.post.record.reply.parent)
# If unable to fetch user that was replied to, code will skip this post.
if not replyToUser:
writeLog("Unable to find the user that this post replies to or quotes")
continue
# Checking if post is by user (i.e. not a repost), withing timelimit and either not a reply or a reply in a thread.
if timestamp > datetime.now() - timedelta(hours = settings.postTimeLimit) and replyToUser == bsky_handle:
# Fetching images if there are any in the post
imageData = ""
images = []
if feed_view.post.embed and hasattr(feed_view.post.embed, "images"):
imageData = feed_view.post.embed.images
elif feed_view.post.embed and hasattr(feed_view.post.embed, "media") and postType == "quote":
imageData = feed_view.post.embed.media.images
# Sometimes posts have included links that are not included in the actual text of the post. This adds adds that back.
if feed_view.post.embed and hasattr(feed_view.post.embed, "external") and hasattr(feed_view.post.embed.external, "uri"):
if feed_view.post.embed.external.uri not in text:
text += '\n'+feed_view.post.embed.external.uri
if imageData:
for image in imageData:
images.append({"url": image.fullsize, "alt": image.alt})
postInfo = {
"text": text,
"replyTo": replyTo,
"images": images,
"type": postType,
"langs": langs
}
# Saving post to posts dictionary
posts[cid] = postInfo;
return posts
# Function for getting username of person replied to. It can mostly be retrieved from the reply section of the tweet that has been fetched,
# but in cases where the original post in a thread has been deleted it causes some weirdness. Hopefully this resolves it.
def getReplyToUser(reply):
uri = reply.uri
username = ""
try:
response = bsky.app.bsky.feed.get_post_thread(params={"uri": uri})
username = response.thread.post.author.handle
except:
writeLog("Unable to retrieve replyTo-user.")
return username
# Function for getting included images. If no images are included, an empty list will be returned,
# and the posting functions will know not to include any images.
def getImages(images):
localImages = []
for image in images:
# Getting alt text for image. If there is none this will be an empty string.
alt = image["alt"]
# Giving the image just a random filename
filename = ''.join(random.choice(string.ascii_lowercase) for i in range(10)) + ".jpg"
filename = imagePath + filename
# Downloading fullsize version of image
urllib.request.urlretrieve(image["url"], filename)
# Saving image info in a dictionary and adding it to the list.
imageInfo = {
"filename": filename,
"alt": alt
}
localImages.append(imageInfo)
return localImages
# Function for restoring shortened URLS
def restoreUrls(record):
text = record.text
encodedText = text.encode("UTF-8")
for facet in record.facets:
if facet.features[0].py_type != "app.bsky.richtext.facet#link":
continue
url = facet.features[0].uri
# The index section designates where a URL starts end ends. Using this we can pick out the exact
# string representing the URL in the post, and replace it with the actual URL.
start = facet.index.byte_start
end = facet.index.byte_end
section = encodedText[start:end]
shortened = section.decode("UTF-8")
text = text.replace(shortened, url)
return text
def getQuotePost(post):
if isinstance(post, dict):
user = post["record"]["author"]["handle"]
cid = post["record"]["cid"]
elif hasattr(post, "author"):
user = post.author.handle
cid = post.cid
else:
user = post.record.author.handle
cid = post.record.cid
return user, cid
# Deprecated function
def imageFail(post):
if (post.embed and (hasattr(post.record.embed, "image") or hasattr(post.record.embed, "media"))
and not hasattr(post.embed, "images")):
return True
else:
return False
def post(posts):
# The updates status is set to false until anything has been altered in the databse. If nothing has been posted in a run, we skip resaving the database.
updates = False
# Running through the posts dictionary reversed, to get oldest posts first.
for cid in reversed(list(posts.keys())):
# Checking if the post is already in the database, and in that case getting the IDs for the post
# on twitter and mastodon. If one or both of these IDs are empty, post will be sent.
tweetId = ""
tootId = ""
tFail = 0
mFail = 0
if cid in database:
tweetId = database[cid]["ids"]["twitterId"]
tootId = database[cid]["ids"]["mastodonId"]
tFail = database[cid]["failed"]["twitter"]
mFail = database[cid]["failed"]["mastodon"]
if mFail >= settings.maxRetries:
writeLog("Error limit reached, not posting to Mastodon")
if not tootId:
updates = True
tootId = "FailedToPost"
if tFail >= settings.maxRetries:
writeLog("Error limit reached, not posting to Twitter")
if not tweetId:
updates = True
tweetId = "FailedToPost"
text = posts[cid]["text"]
replyTo = posts[cid]["replyTo"]
images = posts[cid]["images"]
postType = posts[cid]["type"]
langs = posts[cid]["langs"]
tweetReply = ""
tootReply = ""
# If it is a reply, we get the IDs of the posts we want to reply to from the database.
# If post is not found in database, we can't continue the thread on mastodon and twitter,
# and so we skip it.
if replyTo in database:
tweetReply = database[replyTo]["ids"]["twitterId"]
tootReply = database[replyTo]["ids"]["mastodonId"]
elif replyTo and replyTo not in database:
writeLog("Post was a reply to a post that is not in the database.")
continue
# If either tweet or toot has not previously been posted, we download images (given the post includes images).
if images and (not tweetId or not tootId):
images = getImages(images)
# Trying to post to twitter and mastodon. If posting fails the post ID for each service is set to an
# empty string, letting the code know it should try again next time the code is run.
if not tweetId and tweetReply != "skipped" and tweetReply != "FailedToPost":
updates = True
try:
tweetId = tweet(text, tweetReply, images, postType, langToggle(langs, "twitter"))
except Exception as error:
writeLog(error)
tFail += 1
tweetId = ""
else:
writeLog("Not posting to Twitter")
# Mastodon does not have a quote retweet function, so those will just be sent as replies.
if not tootId and tootReply != "skipped" and tootReply != "FailedToPost":
updates = True
try:
tootId = toot(text, tootReply, images, langToggle(langs, "mastodon"))
except Exception as error:
writeLog(error)
mFail += 1
tootId = ""
else:
writeLog("Not posting to Mastodon")
# Saving post to database
jsonWrite(cid, tweetId, tootId, {"twitter": tFail, "mastodon": mFail})
return updates
# This function uses the language selection as a way to select which posts should be crossposted.
def langToggle(langs, service):
if service == "twitter":
langToggle = settings.twitterLang
elif service == "mastodon":
langToggle = settings.mastodonLang
else:
writeLog("Something has gone very wrong")
exit()
if not langToggle:
return True
if langs and langToggle in langs:
return (not settings.postDefault)
else:
return settings.postDefault
# Function for posting tweets
def tweet(post, replyTo, images, postType, doPost):
if not settings.Twitter or not doPost:
return "skipped";
mediaIds = []
# If post includes images, images are uploaded so that they can be included in the tweet
if images:
mediaIds = []
for image in images:
filename = image["filename"]
alt = image["alt"]
if len(alt) > 1000:
alt = alt[:996] + "..."
res = twitter_images.media_upload(filename)
id = res.media_id
# If alt text was added to the image on bluesky, it's also added to the image on twitter.
if alt:
writeLog("Uploading image " + filename + " with alt: " + alt + " to twitter")
twitter_images.create_media_metadata(id, alt)
mediaIds.append(id)
# Checking if the post is longer than 280 characters, and if so sending to the
# splitPost-function.
partTwo = ""
if postLength(post) > 280:
post, partTwo = splitPost(post)
# If the function does not return a post, splitting failed and we will skip this post.
if not post:
return "skipped"
# I wanted to make this part a little neater, but didn't get it to work and gave up. So here we are.
# If post is both reply and has images it is posted as both a reply and with images (duh), if it's
# a quote with images it's posted as that. If just either of the three it is posted as just that,
# and if neither it is just posted as a text post.
if replyTo and mediaIds and postType == "quote":
a = twitter.create_tweet(text=post, quote_tweet_id=replyTo, media_ids=mediaIds)
elif replyTo and mediaIds and postType == "reply":
a = twitter.create_tweet(text=post, in_reply_to_tweet_id=replyTo, media_ids=mediaIds)
elif postType == "quote":
a = twitter.create_tweet(text=post, quote_tweet_id=replyTo)
elif replyTo:
a = twitter.create_tweet(text=post, in_reply_to_tweet_id=replyTo)
elif mediaIds:
a = twitter.create_tweet(text=post, media_ids=mediaIds)
else:
a = twitter.create_tweet(text=post)
writeLog("Posted to twitter")
id = a[0]["id"]
if partTwo:
a = twitter.create_tweet(text=partTwo, in_reply_to_tweet_id=id)
id = a[0]["id"]
return id
# More or less the exact same function as for tweeting, but for tooting.
def toot(post, replyTo, images, doPost):
if not settings.Mastodon or not doPost:
return "skipped";
mediaIds = []
# If post includes images, images are uploaded so that they can be included in the toot
if images:
for image in images:
filename = image["filename"]
alt = image["alt"]
# If alt text was added to the image on bluesky, it's also added to the image on mastodon,
# otherwise it will be uploaded without alt text.
if alt:
writeLog("Uploading image " + filename + " with alt: " + alt + " to mastodon")
res = mastodon.media_post(filename, description=alt)
else:
writeLog("Uploading image " + filename)
res = mastodon.media_post(filename)
mediaIds.append(res.id)
# Visibility is set to whatever is set in the settings file. If that is hybrid, it sets the visibility either to public or unlisted depending on
# if it is a reply in a thread or not.
visibility = settings.mastodonVisibility
if visibility == "hybrid" and replyTo:
visibility = "unlisted"
elif visibility == "hybrid":
visibility = "public"
# I wanted to make this part a little neater, but didn't get it to work and gave up. So here we are.
# If post is both reply and has images it is posted as both a reply and with images (duh).
# If just either of the two it is posted with just that, and if neither it is just posted as a text post.
if replyTo and mediaIds:
a = mastodon.status_post(post, in_reply_to_id=replyTo, media_ids=mediaIds, visibility=visibility)
elif replyTo:
a = mastodon.status_post(post, in_reply_to_id=replyTo, visibility=visibility)
elif mediaIds:
a = mastodon.status_post(post, media_ids=mediaIds, visibility=visibility)
else:
a = mastodon.status_post(post, visibility=visibility)
writeLog("Posted to mastodon")
id = a["id"]
return id
# Function for correctly counting post length
def postLength(post):
# Twitter shortens urls to 23 characters
shortUrlLength = 23
length = len(post)
# Finding all urls and calculating how much shorter the post will be after shortening
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
urls = re.findall(regex, post)
for url in urls:
urlLength = len(url[0])
if urlLength > shortUrlLength:
length = length - (urlLength - shortUrlLength)
return length
# Function for splitting up posts that are too long for twitter.
def splitPost(text):
writeLog("Splitting post that is too long for twitter.")
first = text
# We first try to split the post into sentences, and send as many as can fit in the first one,
# and the rest in the second.
sentences = text.split(". ")
i = 1
while len(first) > 280 and i < len(sentences):
first = ".".join(sentences[:(len(sentences) - i)]) + "."
second = ".".join(sentences[(len(sentences) - i):])
i += 1
# If splitting by sentance does not result in a short enough post, we try splitting by words instead.
if len(first) > 280:
first = text
words = text.split(" ")
i = 1
while len(first) > 280 and i < len(words):
first = " ".join(words[:(len(words) - i)])
second = " ".join(words[(len(words) - i):])
i += 1
# If splitting has ended up with either a first or second part that is too long, we return empty
# strings and the post is not sent to twitter.
if len(first) > 280 or len(second) > 280:
writeLog("Was not able to split post.")
first = ""
second = ""
return first, second
# Function for writing new lines to the database
def jsonWrite(skeet, tweet, toot, failed):
ids = {
"twitterId": tweet,
"mastodonId": toot
}
data = {
"ids": ids,
"failed": failed
}
# When running, the code saves the database to memory, so instead of just saving the post to the database file,
# we also save it to the open database. This also overwrites the version of the post in memory in case
# an ID that was missing because of a previous failure.
database[skeet] = data
row = {
"skeet": skeet,
"ids": ids,
"failed": failed
}
jsonString = json.dumps(row)
# If the database file exists we want to append to it, otherwise we create it anew.
if os.path.exists(databasePath):
append_write = 'a'
else:
append_write = 'w'
# Skipping adding posts to db file if they are already in it.
if not isInDB(jsonString):
writeLog("Adding to database: " + jsonString)
file = open(databasePath, append_write)
file.write(jsonString + "\n")
file.close()
# Function for reading database file and saving values in a dictionary
def jsonRead():
database = {}
if not os.path.exists(databasePath):
return database
with open(databasePath, 'r') as file:
for line in file:
try:
jsonLine = json.loads(line)
except:
continue
skeet = jsonLine["skeet"]
ids = jsonLine["ids"]
failed = {"twitter": 0, "mastodon": 0}
if "failed" in jsonLine:
failed = jsonLine["failed"]
lineData = {
"ids": ids,
"failed": failed
}
database[skeet] = lineData
return database;
# Function for checking if a line is already in the database-file
def isInDB(line):
if not os.path.exists(databasePath):
return False
with open(databasePath, 'r') as file:
content = file.read()
if line in content:
return True
else:
return False
# Function for writing to the log file
def writeLog(message):
now = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
date = datetime.now().strftime("%y%m%d")
message = str(now) + ": " + str(message) + "\n"
print(message)
if not settings.Logging:
return;
log = logPath + date + ".log"
if os.path.exists(log):
append_write = 'a'
else:
append_write = 'w'
dst = open(log, append_write)
dst.write(message)
dst.close()
# Cleaning up downloaded images
def cleanup():
writeLog("Deleting local images")
for filename in os.listdir(imagePath):
file_path = os.path.join(imagePath, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
writeLog('Failed to delete %s. Reason: %s' % (file_path, e))
# Since we are working with a version of the database in memory, at the end of the run
# we completely overwrite the database on file with the one in memory.
# This does kind of make it uneccessary to write each new post to the file while running,
# but in case the program fails halfway through it gives us kind of a backup.
def saveDB():
writeLog("Saving new database")
append_write = "w"
for skeet in database:
row = {
"skeet": skeet,
"ids": database[skeet]["ids"],
"failed": database[skeet]["failed"]
}
jsonString = json.dumps(row)
file = open(databasePath, append_write)
file.write(jsonString + "\n")
file.close()
append_write = "a"
# Function for counting lines in a file
def countLines(file):
with open(file, 'r') as file:
for count, line in enumerate(file):
pass
return count
# Every twelve hours a backup of the database is saved, in case something happens to the live database.
# If the live database contains fewer lines than the backup it means something has probably gone wrong,
# and before the live database is saved as a backup, the current backup is saved as a new file, so that
# it can be recovered later.
def dbBackup():
if not os.path.isfile(databasePath) or (os.path.isfile(backupPath)
and datetime.fromtimestamp(os.stat(backupPath).st_mtime) > datetime.now() - timedelta(hours = 24)):
return
if os.path.isfile(backupPath):
if countLines(backupPath) < countLines(databasePath):
os.remove(backupPath)
else:
date = datetime.now().strftime("%y%m%d")
os.rename(backupPath, backupPath + "_" + date)
writeLog("Current backup file contains more entries than current live database, backup saved")
shutil.copyfile(databasePath, backupPath)
writeLog("Backup of database taken")
# Here the whole thing is run
database = jsonRead()
posts = getPosts()
updates = post(posts)
if updates:
saveDB()
cleanup()
dbBackup()
if not posts:
writeLog("No new posts found.")
if __name__ == "__main__":
database = db_read()
post_cache = post_cache_read()
timelimit = get_post_time_limit(post_cache)
posts = get_posts(timelimit)
updates, database, post_cache = post(posts, database, post_cache)
post_cache_write(post_cache)
if updates:
save_db(database)
cleanup()
db_backup()
if not posts:
write_log("No new posts found.")

View File

@@ -5,6 +5,7 @@ services:
environment:
BSKY_HANDLE:
BSKY_PASSWORD:
MASTODON_HANDLE:
MASTODON_INSTANCE:
MASTODON_TOKEN:
TWITTER_APP_KEY:
@@ -13,13 +14,16 @@ services:
TWITTER_ACCESS_TOKEN_SECRET:
TWITTER_CROSSPOSTING:
MASTODON_CROSSPOSTING:
LOG_LEVEL:
MASTODON_VISIBILITY:
LOGGING:
MENTIONS:
POST_DEFAULT:
MASTODON_LANG:
TWITTER_LANG:
QUOTE_POSTS:
MAX_RETRIES:
RUN_INTERVAL:
POST_TIME_LIMIT:
MAX_PER_HOUR:
OVERFLOW_POST:
volumes:
- ./dbhost:/db

View File

@@ -1,18 +1,22 @@
BSKY_HANDLE=
BSKY_PASSWORD=
MASTODON_INSTANCE=
MASTODON_TOKEN=
TWITTER_APP_KEY=
TWITTER_APP_SECRET=
TWITTER_ACCESS_TOKEN=
TWITTER_ACCESS_TOKEN_SECRET=
TWITTER_CROSSPOSTING=
MASTODON_CROSSPOSTING=
MASTODON_VISIBILITY=
LOGGING=
POST_DEFAULT=
MASTODON_LANG=
TWITTER_LANG=
MAX_RETRIES=
RUN_INTERVAL=
POST_TIME_LIMIT=
BSKY_HANDLE=
BSKY_PASSWORD=
MASTODON_HANDLE=
MASTODON_INSTANCE=
MASTODON_TOKEN=
TWITTER_APP_KEY=
TWITTER_APP_SECRET=
TWITTER_ACCESS_TOKEN=
TWITTER_ACCESS_TOKEN_SECRET=
TWITTER_CROSSPOSTING=
MASTODON_CROSSPOSTING=
LOG_LEVEL=
MASTODON_VISIBILITY=
MENTIONS=
POST_DEFAULT=
MASTODON_LANG=
TWITTER_LANG=
QUOTE_POSTS=
MAX_RETRIES=
POST_TIME_LIMIT=
MAX_PER_HOUR=
OVERFLOW_POST=

235
input/bluesky.py Normal file
View File

@@ -0,0 +1,235 @@
from atproto import Client
from settings.auth import BSKY_HANDLE, BSKY_PASSWORD
from settings.paths import *
from settings import settings
from local.functions import write_log, lang_toggle, get_post_time_limit
import urllib.request, random, string, arrow
date_in_format = 'YYYY-MM-DDTHH:mm:ss'
# Setting up connections to bluesky, twitter and mastodon
bsky = Client()
bsky.login(BSKY_HANDLE, BSKY_PASSWORD)
# Getting posts from bluesky
def get_posts(timelimit = arrow.utcnow().shift(hours = -1)):
write_log("Gathering posts")
posts = {}
# Getting feed of user
profile_feed = bsky.app.bsky.feed.get_author_feed({'actor': BSKY_HANDLE})
visibility = settings.visibility
for feed_view in profile_feed.feed:
# If the post was not written by the account that posted it, it is a repost and we skip it.
if feed_view.post.author.handle != BSKY_HANDLE:
continue
repost = False
created_at = arrow.get(feed_view.post.record.created_at.split(".")[0], date_in_format)
if hasattr(feed_view.reason, "indexed_at"):
repost = True
created_at = arrow.get(feed_view.reason.indexed_at.split(".")[0], date_in_format)
# The language settings on posts are used to determine if a post should be crossposted
# to a specific service. Here we check the settings against the language of the post to
# see what service it should post to. We also check if posting for a service is enabled
# at all in the settings. If it shouldn't post to either, we skip it.
langs = feed_view.post.record.langs
mastodon_post = (lang_toggle(langs, "mastodon") and settings.Mastodon)
twitter_post = (lang_toggle(langs, "twitter") and settings.Twitter)
if not mastodon_post and not twitter_post:
continue
# If post has an embed of type record it is a quote post, and should not be crossposted
cid = feed_view.post.cid
text = feed_view.post.record.text
# Facets contains things like urls and mentions, which we need to deal with.
# send_mention is used to keep track of if the mention-settings says for the post to be posted or not.
# Default is True, because if nobody is mentioned it should be posted.
send_mention = True
if feed_view.post.record.facets:
# Sometimes bluesky shortens URLs and in that case we need to restore them before crossposting
text = restore_urls(feed_view.post.record)
# If a user is mentioned the parse_mentioned_username function will deal with it according
# to how the variable "mentions" is set in settings. If it is set to "ignore", nothing is
# done.
if settings.mentions != "ignore":
text, send_mention = parse_mentioned_username(feed_view.post.record, text)
# If "mentions" is set to "skip" a post with a mention should not be crossposted, and parse_mentioned_username will
# return send_mention as False.
if not send_mention:
continue
# Setting reply_to_user to the same as user handle and only changing it if the tweet is an actual reply.
# This way we can just check if the variable is the same as the user handle later and send through
# both tweets that are not replies, and posts that are part of a thread.
reply_to_user = BSKY_HANDLE
reply_to_post = ""
quoted_post = ""
quote_url = ""
# Checking who is allowed to reply to the post
allowed_reply = get_allowed_reply(feed_view.post)
# Checking if post is a quote post. Posts with references to feeds look like quote posts but aren't, and so will fail on missing attribute.
# Since quote posts can give values in two different ways it's a bit of a hassle to double check if it is an actual quote post,
# so instead I just try to run the function and if it fails I skip the post
# If there is some reason you would want to crosspost a post referencing a bluesky-feed that I'm not seeing, I might update this in the future.
if feed_view.post.embed and hasattr(feed_view.post.embed, "record"):
try:
quoted_user, quoted_post, quote_url, open = get_quote_post(feed_view.post.embed.record)
except:
write_log("Post " + cid + " is of a type the crossposter can't parse.", "error")
continue
# If post is a quote post of a post from another user, and quote-posting is disabled in settings
# or the post is not open to users not logged in, the post will be skipped
if quoted_user != BSKY_HANDLE and (not settings.quote_posts or not open):
continue
# If the post is a quote of ourselves, the url to the post is removed (if it was included),
# as we instead want to reference the version of the post from twitter or mastodon.
# If no such post exists, we can add back the link to the bluesky-post later
elif quoted_user == BSKY_HANDLE:
text = text.replace(quote_url, "")
# Checking if post is regular reply
if feed_view.post.record.reply:
reply_to_post = feed_view.post.record.reply.parent.cid
# Poster will try to fetch reply to-username the "ordinary" way,
# and if it fails, it will try getting the entire thread and
# finding it that way
try:
reply_to_user = feed_view.reply.parent.author.handle
except:
reply_to_user = get_reply_to_user(feed_view.post.record.reply.parent)
# If unable to fetch user that was replied to, code will skip this post. If the post was not a
# reply at all, the reply_to_user will still be set to the user account.
if not reply_to_user:
write_log("Unable to find the user that post " + cid + " replies to or quotes", "error")
continue
# Checking if post is withing timelimit and not a reply to someone elses post.
if created_at > timelimit and reply_to_user == BSKY_HANDLE:
# Fetching images if there are any in the post
image_data = ""
images = []
if feed_view.post.embed and hasattr(feed_view.post.embed, "images"):
image_data = feed_view.post.embed.images
elif feed_view.post.embed and hasattr(feed_view.post.embed, "media") and hasattr(feed_view.post.embed, "record"):
image_data = feed_view.post.embed.media.images
# Sometimes posts have included links that are not included in the actual text of the post. This adds adds that back.
if feed_view.post.embed and hasattr(feed_view.post.embed, "external") and hasattr(feed_view.post.embed.external, "uri"):
if feed_view.post.embed.external.uri not in text:
text += '\n'+feed_view.post.embed.external.uri
if image_data:
for image in image_data:
images.append({"url": image.fullsize, "alt": image.alt})
if visibility == "hybrid" and reply_to_post:
visibility = "unlisted"
elif visibility == "hybrid":
visibility = "public"
post_info = {
"text": text,
"reply_to_post": reply_to_post,
"quoted_post": quoted_post,
"quote_url": quote_url,
"images": images,
"visibility": visibility,
"twitter": twitter_post,
"mastodon": mastodon_post,
"allowed_reply": allowed_reply,
"repost": repost,
"timestamp": created_at
}
# Saving post to posts dictionary
posts[cid] = post_info;
return posts
# Function for getting username of person replied to. It can mostly be retrieved from the reply section of the tweet that has been fetched,
# but in cases where the original post in a thread has been deleted it causes some weirdness. Hopefully this resolves it.
def get_reply_to_user(reply):
uri = reply.uri
username = ""
try:
response = bsky.app.bsky.feed.get_post_thread(params={"uri": uri})
username = response.thread.post.author.handle
except:
write_log("Unable to retrieve reply_to-user of post.", "error")
return username
def get_allowed_reply(post):
reply_restriction = post.threadgate
if reply_restriction is None:
return "All"
if len(reply_restriction.record.allow) == 0:
return "None"
if reply_restriction.record.allow[0].py_type == "app.bsky.feed.threadgate#followingRule":
return "Following"
if reply_restriction.record.allow[0].py_type == "app.bsky.feed.threadgate#mentionRule":
return "Mentioned"
return "Unknown"
# Function for restoring shortened URLS
def restore_urls(record):
text = record.text
encoded_text = text.encode("UTF-8")
for facet in record.facets:
if facet.features[0].py_type != "app.bsky.richtext.facet#link":
continue
url = facet.features[0].uri
# The index section designates where a URL starts end ends. Using this we can pick out the exact
# string representing the URL in the post, and replace it with the actual URL.
start = facet.index.byte_start
end = facet.index.byte_end
section = encoded_text[start:end]
shortened = section.decode("UTF-8")
text = text.replace(shortened, url)
return text
def parse_mentioned_username(record, text):
# send_mention keeps track if the post should be sent at all.
send_mention = True
encoded_text = text.encode("UTF-8")
for facet in record.facets:
if facet.features[0].py_type != "app.bsky.richtext.facet#mention":
continue
# The index section designates where a username starts end ends. Using this we can pick out the exact
# string representing the user in the post, and replace it with the corrected value
start = facet.index.byte_start
end = facet.index.byte_end
username = encoded_text[start:end]
username = username.decode("UTF-8")
# If the mentions setting is set to skip, None will be returned, if it's set to strip the
# text will be returned with the @ of the username removed, if it's set to URL the name will
# be replaced with a link to the profile.
if settings.mentions == "skip":
send_mention = False
elif settings.mentions == "strip":
text = text.replace(username, username.replace("@", ""))
elif settings.mentions == "url":
base_url = "https://bsky.app/profile/"
did = facet.features[0].did
url = base_url + did
text = text.replace(username, url)
return text, send_mention
# Quoted posts can be stored in several different ways for some reason. With this
# function we check which one is used and fetches information accordingly.
def get_quote_post(post):
open = True
if isinstance(post, dict):
user = post["record"]["author"]["handle"]
cid = post["record"]["cid"]
uri = post["record"]["uri"]
labels = post["record"]["author"]["labels"]
elif hasattr(post, "author"):
user = post.author.handle
cid = post.cid
uri = post.uri
labels = post.author.labels
else:
user = post.record.author.handle
cid = post.record.cid
uri = post.record.uri
labels = post.record.author.labels
# the val label is used by bluesky to check if a post should be viewable by people
# who are not logged in. When crossposting with a link to a bsky post, we first
# want to make sure that the post in question is publicly available.
if labels and labels[0].val == "!no-unauthenticated":
open = False
url = "https://bsky.app/profile/" + user + "/post/" + uri.split("/")[-1]
return user, cid, url, open

131
local/db.py Normal file
View File

@@ -0,0 +1,131 @@
from settings.paths import *
from local.functions import write_log
import json, os, shutil, arrow
# Function for writing new lines to the database
def db_write(skeet, tweet, toot, failed, database):
ids = {
"twitter_id": tweet,
"mastodon_id": toot
}
data = {
"ids": ids,
"failed": failed
}
# When running, the code saves the database to memory, so instead of just saving the post to the database file,
# we also save it to the open database. This also overwrites the version of the post in memory in case
# an ID that was missing because of a previous failure.
database[skeet] = data
row = {
"skeet": skeet,
"ids": ids,
"failed": failed
}
json_string = json.dumps(row)
# If the database file exists we want to append to it, otherwise we create it anew.
if os.path.exists(database_path):
append_write = 'a'
else:
append_write = 'w'
# Skipping adding posts to db file if they are already in it.
if not is_in_db(json_string):
write_log("Adding to database: " + json_string)
file = open(database_path, append_write)
file.write(json_string + "\n")
file.close()
return database
# Function for reading database file and saving values in a dictionary
def db_read():
database = {}
if not os.path.exists(database_path):
return database
with open(database_path, 'r') as file:
for line in file:
try:
json_line = json.loads(line)
except:
continue
skeet = json_line["skeet"]
ids = json_line["ids"]
ids = db_convert(ids)
failed = {"twitter": 0, "mastodon": 0}
if "failed" in json_line:
failed = json_line["failed"]
line_data = {
"ids": ids,
"failed": failed
}
database[skeet] = line_data
return database;
# After changing from camelCase to snake_case, old database entries will have to be converted.
def db_convert(ids_in):
ids_out = {}
try:
ids_out["twitter_id"] = ids_in["twitter_id"]
except:
ids_out["twitter_id"] = ids_in["twitterId"]
try:
ids_out["mastodon_id"] = ids_in["mastodon_id"]
except:
ids_out["mastodon_id"] = ids_in["mastodonId"]
return ids_out
# Function for checking if a line is already in the database-file
def is_in_db(line):
if not os.path.exists(database_path):
return False
with open(database_path, 'r') as file:
content = file.read()
if line in content:
return True
else:
return False
# Since we are working with a version of the database in memory, at the end of the run
# we completely overwrite the database on file with the one in memory.
# This does kind of make it uneccessary to write each new post to the file while running,
# but in case the program fails halfway through it gives us kind of a backup.
def save_db(database):
write_log("Saving new database")
append_write = "w"
for skeet in database:
row = {
"skeet": skeet,
"ids": database[skeet]["ids"],
"failed": database[skeet]["failed"]
}
jsonString = json.dumps(row)
file = open(database_path, append_write)
file.write(jsonString + "\n")
file.close()
append_write = "a"
# Every twelve hours a backup of the database is saved, in case something happens to the live database.
# If the live database contains fewer lines than the backup it means something has probably gone wrong,
# and before the live database is saved as a backup, the current backup is saved as a new file, so that
# it can be recovered later.
def db_backup():
if not os.path.isfile(database_path) or (os.path.isfile(backup_path)
and arrow.Arrow.fromtimestamp(os.stat(backup_path).st_mtime) > arrow.utcnow().shift(hours = -24)):
return
if os.path.isfile(backup_path):
if count_lines(backup_path) < count_lines(database_path):
os.remove(backup_path)
else:
date = arrow.utcnow().format("YYMMDD")
os.rename(backup_path, backup_path + "_" + date)
write_log("Current backup file contains more entries than current live database, backup saved", "error")
shutil.copyfile(database_path, backup_path)
write_log("Backup of database taken")
# Function for counting lines in a file
def count_lines(file):
count = 0;
with open(file, 'r') as file:
for count, line in enumerate(file):
pass
return count

114
local/functions.py Normal file
View File

@@ -0,0 +1,114 @@
from settings.auth import *
from settings.paths import *
from local.functions import *
import settings.settings as settings
import os, shutil, re, arrow
# This function uses the language selection as a way to select which posts should be crossposted.
def lang_toggle(langs, service):
if service == "twitter":
lang_toggle = settings.twitter_lang
elif service == "mastodon":
lang_toggle = settings.mastodon_lang
else:
write_log("Something has gone very wrong.", "error")
exit()
if not lang_toggle:
return True
if langs and lang_toggle in langs:
return (not settings.post_default)
else:
return settings.post_default
# Function for correctly counting post length
def post_length(post):
# Twitter shortens urls to 23 characters
short_url_length = 23
length = len(post)
# Finding all urls and calculating how much shorter the post will be after shortening
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
urls = re.findall(regex, post)
for url in urls:
url_length = len(url[0])
if url_length > short_url_length:
length = length - (url_length - short_url_length)
return length
# Function for writing to the log file
def write_log(message, type = "message"):
if settings.log_level == "none" or (settings.log_level == "error" and type == "message"):
return;
now = arrow.utcnow().format("DD/MM/YYYY HH:mm:ss")
date = arrow.utcnow().format("YYMMDD")
message = str(now) + " (" + type.upper() + "): " + str(message) + "\n"
print(message)
log = log_path + date + ".log"
if os.path.exists(log):
append_write = 'a'
else:
append_write = 'w'
dst = open(log, append_write)
dst.write(message)
dst.close()
# Cleaning up downloaded images
def cleanup():
write_log("Deleting local images")
for filename in os.listdir(image_path):
file_path = os.path.join(image_path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
write_log('Failed to delete %s. Reason: %s' % (file_path, e), "error")
# Following two functions deals with the post per hour limit
# Function for reading post log and checking number of posts sent in last hour
def post_cache_read():
write_log("Reading cache of recent posts.")
cache = {}
timelimit = arrow.utcnow().shift(hours = -1)
if not os.path.exists(post_cache_path):
write_log(post_cache_path + " not found.")
return cache
with open(post_cache_path, 'r') as file:
for line in file:
try:
post_id = line.split(";")[0]
timestamp = int(line.split(".")[1])
timestamp = arrow.Arrow.fromtimestamp(timestamp)
except Exception as error:
write_log(error, "error")
continue
if timestamp > timelimit:
cache[post_id] = timestamp
return cache;
def post_cache_write(cache):
write_log("Saving post cache.")
append_write = "w"
for post_id in cache:
timestamp = str(cache[post_id].timestamp())
file = open(post_cache_path, append_write)
file.write(post_id + ";" + timestamp + "\n")
file.close()
append_write = "a"
# The timelimit specifies the cutoff time for which posts are crossposted. This is usually based on the
# post_time_limit in settings, but if overflow_posts is set to "skip", meaning any posts that could
# not be posted due to the hourly post max limit is to be skipped, then the timelimit is instead set to
# when the last post was sent.
def get_post_time_limit(cache):
timelimit = arrow.utcnow().shift(hours = -settings.post_time_limit)
if settings.overflow_posts != "skip":
return timelimit
for post_id in cache:
if timelimit < cache[post_id]:
timelimit = cache[post_id]
return timelimit

46
output/mastodon.py Normal file
View File

@@ -0,0 +1,46 @@
from mastodon import Mastodon
from settings import settings
from settings.auth import *
from local.functions import write_log
if settings.Mastodon:
mastodon = Mastodon(
access_token = MASTODON_TOKEN,
api_base_url = MASTODON_INSTANCE
)
# More or less the exact same function as for tweeting, but for tooting.
def toot(post, reply_to_post, quoted_post, images, visibility = "unlisted"):
# Since mastodon does not have a quote repost function, quote posts are turned into replies. If the post is both
# a reply and a quote post, the quote is replaced with a url to the post quoted.
if reply_to_post is None and quoted_post:
reply_to_post = quoted_post
elif reply_to_post is not None and quoted_post:
post_url = MASTODON_INSTANCE + "@" + MASTODON_USER + "/" + str(quoted_post)
post += "\n" + post_url
media_ids = []
# If post includes images, images are uploaded so that they can be included in the toot
if images:
for image in images:
filename = image["filename"]
alt = image["alt"]
# If alt text was added to the image on bluesky, it's also added to the image on mastodon,
# otherwise it will be uploaded without alt text.
if alt:
write_log("Uploading image " + filename + " with alt: " + alt + " to mastodon")
res = mastodon.media_post(filename, description=alt)
else:
write_log("Uploading image " + filename)
res = mastodon.media_post(filename)
media_ids.append(res.id)
# I wanted to make this part a little neater, but didn't get it to work and gave up. So here we are.
# If post is both reply and has images it is posted as both a reply and with images (duh).
# If just either of the two it is posted with just that, and if neither it is just posted as a text post.
a = mastodon.status_post(post, in_reply_to_id=reply_to_post, media_ids=media_ids, visibility=visibility)
write_log("Posted to mastodon")
id = a["id"]
return id
def retoot(toot_id):
mastodon.status_reblog(toot_id)
write_log("Boosted toot " + str(toot_id))

179
output/post.py Normal file
View File

@@ -0,0 +1,179 @@
import random, string, urllib, arrow
from settings import settings
from settings.paths import *
from local.functions import write_log
from local.db import db_write
from output.twitter import tweet, retweet
from output.mastodon import toot, retoot
def post(posts, database, post_cache):
# The updates status is set to false until anything has been altered in the databse. If nothing has been posted in a run, we skip resaving the database.
updates = False
# Running through the posts dictionary reversed, to get oldest posts first.
for cid in reversed(list(posts.keys())):
post = posts[cid]
# Checking if a maximum amount of posts per hour is set, and if so if it has been reached.
if settings.max_per_hour != 0 and len(post_cache) >= settings.max_per_hour:
write_log("Max posts per hour reached.")
break
# If a post is posted, we want to add a timestamp to the post_cache. Since there are several
# reasons why a post might not be posted, we start out with this set to false for each post,
# and change it to true if a post is actually sent.
posted = False
# Checking if the post is already in the database, and in that case getting the IDs for the post
# on twitter and mastodon. If one or both of these IDs are empty, post will be sent.
# Also checking the existing fail count against the max_retries set in settings, to avoid
# retrying a failure so much that the poster gets ratelimited
tweet_id = ""
toot_id = ""
t_fail = 0
m_fail = 0
if cid in database:
tweet_id = database[cid]["ids"]["twitter_id"]
toot_id = database[cid]["ids"]["mastodon_id"]
t_fail = database[cid]["failed"]["twitter"]
m_fail = database[cid]["failed"]["mastodon"]
if m_fail >= settings.max_retries:
write_log("Error limit reached, not posting to Mastodon", "error")
if not toot_id:
updates = True
toot_id = "FailedToPost"
if t_fail >= settings.max_retries:
write_log("Error limit reached, not posting to Twitter", "error")
if not tweet_id:
updates = True
tweet_id = "FailedToPost"
text = post["text"]
reply_to_post = post["reply_to_post"]
quoted_post = post["quoted_post"]
quote_url = post["quote_url"]
images = post["images"]
visibility = post["visibility"]
allowed_reply = post["allowed_reply"]
tweet_reply = ""
toot_reply = ""
tweet_quote = ""
toot_quote = ""
# If the post has already been sent to both twitter and mastodon and is not a repost, no
# further action is needed.
if tweet_id and toot_id and not post["repost"]:
continue
# If a retweet is found within the last hour, we check the cache to see if it has already been retweeted
repost_timelimit = arrow.utcnow().shift(hours = -1)
if cid in post_cache:
repost_timelimit = post_cache[cid]
# If it is a reply, we get the IDs of the posts we want to reply to from the database.
# If post is not found in database, we can't continue the thread on mastodon and twitter,
# and so we skip it.
if reply_to_post in database:
tweet_reply = database[reply_to_post]["ids"]["twitter_id"]
toot_reply = database[reply_to_post]["ids"]["mastodon_id"]
elif reply_to_post and reply_to_post not in database:
write_log("Post " + cid + " was a reply to a post that is not in the database.", "error")
continue
# If post is a quote post we get the IDs of the posts we want to quote from the database.
# If the posts are not found in the database we check if the quote_post setting is true or false in settings.
# If true we add the URL of the bluesky post to the text of the post, if false we skip the post.
if quoted_post in database:
tweet_quote = database[quoted_post]["ids"]["twitter_id"]
toot_quote = database[quoted_post]["ids"]["mastodon_id"]
elif quoted_post and quoted_post not in database:
if settings.quote_posts and quote_url not in text:
text += "\n" + quote_url
elif not settings.quote_posts:
write_log("Post " + cid + " was a quote of a post that is not in the database.", "error")
continue
# In case the tweet or toot reply/quote variables are empty, we set them to None, to make sure they are in the correct format for
# the api requests. This is not necessary for the toot_quote variable, as it is not sent as a parameter in itself anyway.
if not tweet_reply:
tweet_reply = None
if not toot_reply:
toot_reply = None
if not tweet_quote:
tweet_quote = None
# If either tweet or toot has not previously been posted, we download images (given the post includes images).
if images and (not tweet_id or not toot_id):
images = get_images(images)
# If mastodon is set to false, the post is not sent to mastodon.
if not post["twitter"]:
toot_id = "skipped"
write_log("Not posting to Twitter because posting was set to false.")
elif tweet_id and not post["repost"]:
write_log("Post " + cid + " already sent to twitter.")
# if the post already exists and is a repost, we check if it has already been reposted, and if not, repost it.
elif tweet_id and post["repost"] and post["timestamp"] > repost_timelimit:
try:
# This is where retweets would go if they weren't locked behind a paywall.
pass
# retweet(tweet_id)
# posted = True
except Exception as error:
write_log(error, "error")
# Trying to post to twitter and mastodon. If posting fails the post ID for each service is set to an
# empty string, letting the code know it should try again next time the code is run.
elif not tweet_id and tweet_reply != "skipped" and tweet_reply != "FailedToPost":
updates = True
try:
tweet_id = tweet(text, tweet_reply, tweet_quote, images, allowed_reply)
posted = True
except Exception as error:
write_log(error, "error")
t_fail += 1
tweet_id = ""
# If a tweet failes as a duplicate post, we don't want to try sending it again.
if "duplicate content" in str(error):
t_fail = settings.max_retries
tweet_id = "duplicate"
else:
write_log("Not posting " + cid + " to Twitter")
# If mastodon is set to false, the post is not sent to mastodon.
if not post["mastodon"]:
toot_id = "skipped"
write_log("Not posting to Mastodon because posting was set to false.")
elif toot_id and not post["repost"]:
write_log("Post " + cid + " already sent to mastodon.")
# if the post already exists and is a repost, we check if it has already been reposted, and if not, repost it.
elif toot_id and post["repost"] and post["timestamp"] > repost_timelimit:
try:
retoot(toot_id)
posted = True
except Exception as error:
write_log(error, "error")
# Mastodon does not have a quote retweet function, so those will just be sent as replies.
elif not toot_id and toot_reply != "skipped" and toot_reply != "FailedToPost":
updates = True
try:
toot_id = toot(text, toot_reply, toot_quote, images, visibility)
posted = True
except Exception as error:
write_log(error, "error")
m_fail += 1
toot_id = ""
else:
write_log("Not posting " + cid + " to Mastodon")
# Saving post to database
database = db_write(cid, tweet_id, toot_id, {"twitter": t_fail, "mastodon": m_fail}, database)
if posted:
post_cache[cid] = arrow.utcnow()
return updates, database, post_cache
# Function for getting included images. If no images are included, an empty list will be returned,
# and the posting functions will know not to include any images.
def get_images(images):
local_images = []
for image in images:
# Getting alt text for image. If there is none this will be an empty string.
alt = image["alt"]
# Giving the image just a random filename
filename = ''.join(random.choice(string.ascii_lowercase) for i in range(10)) + ".jpg"
filename = image_path + filename
# Downloading fullsize version of image
urllib.request.urlretrieve(image["url"], filename)
# Saving image info in a dictionary and adding it to the list.
image_info = {
"filename": filename,
"alt": alt
}
local_images.append(image_info)
return local_images

91
output/twitter.py Normal file
View File

@@ -0,0 +1,91 @@
import tweepy
from settings import settings
from settings.auth import *
from local.functions import write_log
if settings.Twitter:
twitter_client = tweepy.Client(consumer_key=TWITTER_APP_KEY,
consumer_secret=TWITTER_APP_SECRET,
access_token=TWITTER_ACCESS_TOKEN,
access_token_secret=TWITTER_ACCESS_TOKEN_SECRET)
tweepy_auth = tweepy.OAuth1UserHandler(TWITTER_APP_KEY, TWITTER_APP_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
twitter_api = tweepy.API(tweepy_auth)
# Function for posting tweets
def tweet(post, reply_to_post, quoted_post, images, allowed_reply):
media_ids = None
reply_settings = set_reply_settings(allowed_reply)
# If post includes images, images are uploaded so that they can be included in the tweet
if images:
media_ids = []
for image in images:
filename = image["filename"]
alt = image["alt"]
if len(alt) > 1000:
alt = alt[:996] + "..."
res = twitter_api.media_upload(filename)
id = res.media_id
# If alt text was added to the image on bluesky, it's also added to the image on twitter.
if alt:
write_log("Uploading image " + filename + " with alt: " + alt + " to twitter")
twitter_api.create_media_metadata(id, alt)
media_ids.append(id)
# Checking if the post is longer than 280 characters, and if so sending to the
# splitPost-function.
partTwo = ""
if len(post) > 280:
post, partTwo = split_post(post)
# If the function does not return a post, splitting failed and we will skip this post.
if not post:
return "skipped"
a = twitter_client.create_tweet(text=post, reply_settings=reply_settings, quote_tweet_id=quoted_post, in_reply_to_tweet_id=reply_to_post, media_ids=media_ids)
write_log("Posted to twitter")
id = a[0]["id"]
if partTwo:
a = twitter_client.create_tweet(text=partTwo, in_reply_to_tweet_id=id)
id = a[0]["id"]
return id
def retweet(tweet_id):
a = twitter_client.retweet(tweet_id)
write_log("retweeted tweet " + str(tweet_id))
# Function for splitting up posts that are too long for twitter.
def split_post(text):
write_log("Splitting post that is too long for twitter.")
first = text
# We first try to split the post into sentences, and send as many as can fit in the first one,
# and the rest in the second.
sentences = text.split(". ")
i = 1
while len(first) > 280 and i < len(sentences):
first = ".".join(sentences[:(len(sentences) - i)]) + "."
second = ".".join(sentences[(len(sentences) - i):])
i += 1
# If splitting by sentance does not result in a short enough post, we try splitting by words instead.
if len(first) > 280:
first = text
words = text.split(" ")
i = 1
while len(first) > 280 and i < len(words):
first = " ".join(words[:(len(words) - i)])
second = " ".join(words[(len(words) - i):])
i += 1
# If splitting has ended up with either a first or second part that is too long, we return empty
# strings and the post is not sent to twitter.
if len(first) > 280 or len(second) > 280:
write_log("Was not able to split post.", "error")
first = ""
second = ""
return first, second
def set_reply_settings(allowed):
reply_settings = None
if allowed == "None" or allowed == "Mentioned":
reply_settings = "mentionedUsers"
elif allowed == "Following":
reply_settings = "following"
return reply_settings

1
poster/bin/python Symbolic link
View File

@@ -0,0 +1 @@
python3

1
poster/bin/python3 Symbolic link
View File

@@ -0,0 +1 @@
/bin/python3

View File

@@ -0,0 +1,5 @@
"""Run the EasyInstall command"""
if __name__ == '__main__':
from setuptools.command.easy_install import main
main()

View File

@@ -0,0 +1,562 @@
A_Rog <adam.thomas.rogerson@gmail.com>
Aakanksha Agrawal <11389424+rasponic@users.noreply.github.com>
Abhinav Sagar <40603139+abhinavsagar@users.noreply.github.com>
ABHYUDAY PRATAP SINGH <abhyudaypratap@outlook.com>
abs51295 <aagams68@gmail.com>
AceGentile <ventogrigio83@gmail.com>
Adam Chainz <adam@adamj.eu>
Adam Tse <adam.tse@me.com>
Adam Tse <atse@users.noreply.github.com>
Adam Wentz <awentz@theonion.com>
admin <admin@admins-MacBook-Pro.local>
Adrien Morison <adrien.morison@gmail.com>
ahayrapetyan <ahayrapetya2@bloomberg.net>
Ahilya <ahilya16009@iiitd.ac.in>
AinsworthK <yat626@yahoo.com.hk>
Akash Srivastava <akashsrivastava4927@gmail.com>
Alan Yee <alyee@ucsd.edu>
Albert Tugushev <albert@tugushev.ru>
Albert-Guan <albert.guan94@gmail.com>
albertg <albert.guan94@gmail.com>
Aleks Bunin <github@compuix.com>
Alethea Flowers <magicalgirl@google.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alex Grönholm <alex.gronholm@nextday.fi>
Alex Loosley <a.loosley@reply.de>
Alex Morega <alex@grep.ro>
Alex Stachowiak <alexander@computer.org>
Alexander Shtyrov <rawzausho@gmail.com>
Alexandre Conrad <alexandre.conrad@gmail.com>
Alexey Popravka <a.popravka@smartweb.com.ua>
Alexey Popravka <alexey.popravka@horsedevel.com>
Alli <alzeih@users.noreply.github.com>
Ami Fischman <ami@fischman.org>
Ananya Maiti <ananyoevo@gmail.com>
Anatoly Techtonik <techtonik@gmail.com>
Anders Kaseorg <andersk@mit.edu>
Andreas Lutro <anlutro@gmail.com>
Andrei Geacar <andrei.geacar@gmail.com>
Andrew Gaul <andrew@gaul.org>
Andrey Bulgakov <mail@andreiko.ru>
Andrés Delfino <34587441+andresdelfino@users.noreply.github.com>
Andrés Delfino <adelfino@gmail.com>
Andy Freeland <andy.freeland@redjack.com>
Andy Freeland <andy@andyfreeland.net>
Andy Kluger <AndydeCleyre@users.noreply.github.com>
Ani Hayrapetyan <ahayrapetya2@bloomberg.net>
Aniruddha Basak <codewithaniruddha@gmail.com>
Anish Tambe <anish.tambe@yahoo.in>
Anrs Hu <anrs@douban.com>
Anthony Sottile <asottile@umich.edu>
Antoine Musso <hashar@free.fr>
Anton Ovchinnikov <revolver112@gmail.com>
Anton Patrushev <apatrushev@gmail.com>
Antonio Alvarado Hernandez <tnotstar@gmail.com>
Antony Lee <anntzer.lee@gmail.com>
Antti Kaihola <akaihol+github@ambitone.com>
Anubhav Patel <anubhavp28@gmail.com>
Anuj Godase <godaseanuj@gmail.com>
AQNOUCH Mohammed <aqnouch.mohammed@gmail.com>
AraHaan <seandhunt_7@yahoo.com>
Arindam Choudhury <arindam@live.com>
Armin Ronacher <armin.ronacher@active-4.com>
Artem <duketemon@users.noreply.github.com>
Ashley Manton <ajd.manton@googlemail.com>
Ashwin Ramaswami <aramaswamis@gmail.com>
atse <atse@users.noreply.github.com>
Atsushi Odagiri <aodagx@gmail.com>
Avner Cohen <israbirding@gmail.com>
Baptiste Mispelon <bmispelon@gmail.com>
Barney Gale <barney.gale@gmail.com>
barneygale <barney.gale@gmail.com>
Bartek Ogryczak <b.ogryczak@gmail.com>
Bastian Venthur <mail@venthur.de>
Ben Darnell <ben@bendarnell.com>
Ben Hoyt <benhoyt@gmail.com>
Ben Rosser <rosser.bjr@gmail.com>
Bence Nagy <bence@underyx.me>
Benjamin Peterson <benjamin@python.org>
Benjamin VanEvery <ben@simondata.com>
Benoit Pierre <benoit.pierre@gmail.com>
Berker Peksag <berker.peksag@gmail.com>
Bernardo B. Marques <bernardo.fire@gmail.com>
Bernhard M. Wiedemann <bwiedemann@suse.de>
Bertil Hatt <bertil.hatt@farfetch.com>
Bogdan Opanchuk <bogdan@opanchuk.net>
BorisZZZ <BorisZZZ@users.noreply.github.com>
Brad Erickson <eosrei@gmail.com>
Bradley Ayers <bradley.ayers@gmail.com>
Brandon L. Reiss <brandon@damyata.co>
Brandt Bucher <brandtbucher@gmail.com>
Brett Randall <javabrett@gmail.com>
Brian Cristante <33549821+brcrista@users.noreply.github.com>
Brian Cristante <brcrista@microsoft.com>
Brian Rosner <brosner@gmail.com>
BrownTruck <BrownTruck@users.noreply.github.com>
Bruno Oliveira <nicoddemus@gmail.com>
Bruno Renié <brutasse@gmail.com>
Bstrdsmkr <bstrdsmkr@gmail.com>
Buck Golemon <buck@yelp.com>
burrows <burrows@preveil.com>
Bussonnier Matthias <bussonniermatthias@gmail.com>
c22 <c22@users.noreply.github.com>
Caleb Martinez <accounts@calebmartinez.com>
Calvin Smith <eukaryote@users.noreply.github.com>
Carl Meyer <carl@oddbird.net>
Carlos Liam <carlos@aarzee.me>
Carol Willing <carolcode@willingconsulting.com>
Carter Thayer <carterwthayer@gmail.com>
Cass <cass.petrus@gmail.com>
Chandrasekhar Atina <chandu.atina@gmail.com>
Chih-Hsuan Yen <yan12125@gmail.com>
Chih-Hsuan Yen <yen@chyen.cc>
Chris Brinker <chris.brinker@gmail.com>
Chris Hunt <chrahunt@gmail.com>
Chris Jerdonek <chris.jerdonek@gmail.com>
Chris McDonough <chrism@plope.com>
Chris Wolfe <chriswwolfe@gmail.com>
Christian Heimes <christian@python.org>
Christian Oudard <christian.oudard@gmail.com>
Christopher Hunt <chrahunt@gmail.com>
Christopher Snyder <cnsnyder@users.noreply.github.com>
Clark Boylan <clark.boylan@gmail.com>
Clay McClure <clay@daemons.net>
Cody <Purring@users.noreply.github.com>
Cody Soyland <codysoyland@gmail.com>
Colin Watson <cjwatson@debian.org>
Connor Osborn <cdosborn@email.arizona.edu>
Cooper Lees <me@cooperlees.com>
Cooper Ry Lees <me@cooperlees.com>
Cory Benfield <lukasaoz@gmail.com>
Cory Wright <corywright@gmail.com>
Craig Kerstiens <craig.kerstiens@gmail.com>
Cristian Sorinel <cristian.sorinel@gmail.com>
Curtis Doty <Curtis@GreenKey.net>
cytolentino <ctolentino8@bloomberg.net>
Damian Quiroga <qdamian@gmail.com>
Dan Black <dyspop@gmail.com>
Dan Savilonis <djs@n-cube.org>
Dan Sully <daniel-github@electricrain.com>
daniel <mcdonaldd@unimelb.edu.au>
Daniel Collins <accounts@dac.io>
Daniel Hahler <git@thequod.de>
Daniel Holth <dholth@fastmail.fm>
Daniel Jost <torpedojost@gmail.com>
Daniel Shaulov <daniel.shaulov@gmail.com>
Daniele Esposti <expobrain@users.noreply.github.com>
Daniele Procida <daniele@vurt.org>
Danny Hermes <daniel.j.hermes@gmail.com>
Dav Clark <davclark@gmail.com>
Dave Abrahams <dave@boostpro.com>
Dave Jones <dave@waveform.org.uk>
David Aguilar <davvid@gmail.com>
David Black <db@d1b.org>
David Bordeynik <david.bordeynik@gmail.com>
David Bordeynik <david@zebra-med.com>
David Caro <david@dcaro.es>
David Evans <d@drhevans.com>
David Linke <dr.david.linke@gmail.com>
David Pursehouse <david.pursehouse@gmail.com>
David Tucker <david@tucker.name>
David Wales <daviewales@gmail.com>
Davidovich <david.genest@gmail.com>
derwolfe <chriswwolfe@gmail.com>
Desetude <harry@desetude.com>
Diego Caraballo <diegocaraballo84@gmail.com>
DiegoCaraballo <diegocaraballo84@gmail.com>
Dmitry Gladkov <dmitry.gladkov@gmail.com>
Domen Kožar <domen@dev.si>
Donald Stufft <donald@stufft.io>
Dongweiming <dongweiming@admaster.com.cn>
Douglas Thor <dougthor42@users.noreply.github.com>
DrFeathers <WilliamGeorgeBurgess@gmail.com>
Dustin Ingram <di@di.codes>
Dwayne Bailey <dwayne@translate.org.za>
Ed Morley <501702+edmorley@users.noreply.github.com>
Ed Morley <emorley@mozilla.com>
Eitan Adler <lists@eitanadler.com>
ekristina <panacejja@gmail.com>
elainechan <elaine.chan@outlook.com>
Eli Schwartz <eschwartz93@gmail.com>
Eli Schwartz <eschwartz@archlinux.org>
Emil Burzo <contact@emilburzo.com>
Emil Styrke <emil.styrke@gmail.com>
Endoh Takanao <djmchl@gmail.com>
enoch <lanxenet@gmail.com>
Erdinc Mutlu <erdinc_mutlu@yahoo.com>
Eric Gillingham <Gillingham@bikezen.net>
Eric Hanchrow <eric.hanchrow@gmail.com>
Eric Hopper <hopper@omnifarious.org>
Erik M. Bray <embray@stsci.edu>
Erik Rose <erik@mozilla.com>
Ernest W Durbin III <ewdurbin@gmail.com>
Ernest W. Durbin III <ewdurbin@gmail.com>
Erwin Janssen <erwinjanssen@outlook.com>
Eugene Vereshchagin <evvers@gmail.com>
everdimension <everdimension@gmail.com>
Felix Yan <felixonmars@archlinux.org>
fiber-space <fiber-space@users.noreply.github.com>
Filip Kokosiński <filip.kokosinski@gmail.com>
Florian Briand <ownerfrance+github@hotmail.com>
Florian Rathgeber <florian.rathgeber@gmail.com>
Francesco <f.guerrieri@gmail.com>
Francesco Montesano <franz.bergesund@gmail.com>
Frost Ming <mianghong@gmail.com>
Gabriel Curio <g.curio@gmail.com>
Gabriel de Perthuis <g2p.code@gmail.com>
Garry Polley <garrympolley@gmail.com>
gdanielson <graeme.danielson@gmail.com>
Geoffrey Lehée <geoffrey@lehee.name>
Geoffrey Sneddon <me@gsnedders.com>
George Song <george@55minutes.com>
Georgi Valkov <georgi.t.valkov@gmail.com>
Giftlin Rajaiah <giftlin.rgn@gmail.com>
gizmoguy1 <gizmoguy1@gmail.com>
gkdoc <40815324+gkdoc@users.noreply.github.com>
Gopinath M <31352222+mgopi1990@users.noreply.github.com>
GOTO Hayato <3532528+gh640@users.noreply.github.com>
gpiks <gaurav.pikale@gmail.com>
Guilherme Espada <porcariadagata@gmail.com>
Guy Rozendorn <guy@rzn.co.il>
gzpan123 <gzpan123@gmail.com>
Hanjun Kim <hallazzang@gmail.com>
Hari Charan <hcharan997@gmail.com>
Harsh Vardhan <harsh59v@gmail.com>
Herbert Pfennig <herbert@albinen.com>
Hsiaoming Yang <lepture@me.com>
Hugo <hugovk@users.noreply.github.com>
Hugo Lopes Tavares <hltbra@gmail.com>
Hugo van Kemenade <hugovk@users.noreply.github.com>
hugovk <hugovk@users.noreply.github.com>
Hynek Schlawack <hs@ox.cx>
Ian Bicking <ianb@colorstudy.com>
Ian Cordasco <graffatcolmingov@gmail.com>
Ian Lee <IanLee1521@gmail.com>
Ian Stapleton Cordasco <graffatcolmingov@gmail.com>
Ian Wienand <ian@wienand.org>
Ian Wienand <iwienand@redhat.com>
Igor Kuzmitshov <kuzmiigo@gmail.com>
Igor Sobreira <igor@igorsobreira.com>
Ilya Baryshev <baryshev@gmail.com>
INADA Naoki <songofacandy@gmail.com>
Ionel Cristian Mărieș <contact@ionelmc.ro>
Ionel Maries Cristian <ionel.mc@gmail.com>
Ivan Pozdeev <vano@mail.mipt.ru>
Jacob Kim <me@thejacobkim.com>
jakirkham <jakirkham@gmail.com>
Jakub Stasiak <kuba.stasiak@gmail.com>
Jakub Vysoky <jakub@borka.cz>
Jakub Wilk <jwilk@jwilk.net>
James Cleveland <jamescleveland@gmail.com>
James Cleveland <radiosilence@users.noreply.github.com>
James Firth <hello@james-firth.com>
James Polley <jp@jamezpolley.com>
Jan Pokorný <jpokorny@redhat.com>
Jannis Leidel <jannis@leidel.info>
jarondl <me@jarondl.net>
Jason R. Coombs <jaraco@jaraco.com>
Jay Graves <jay@skabber.com>
Jean-Christophe Fillion-Robin <jchris.fillionr@kitware.com>
Jeff Barber <jbarber@computer.org>
Jeff Dairiki <dairiki@dairiki.org>
Jelmer Vernooij <jelmer@jelmer.uk>
jenix21 <devfrog@gmail.com>
Jeremy Stanley <fungi@yuggoth.org>
Jeremy Zafran <jzafran@users.noreply.github.com>
Jiashuo Li <jiasli@microsoft.com>
Jim Garrison <jim@garrison.cc>
Jivan Amara <Development@JivanAmara.net>
John Paton <j.paton@catawiki.nl>
John-Scott Atlakson <john.scott.atlakson@gmail.com>
johnthagen <johnthagen@gmail.com>
johnthagen <johnthagen@users.noreply.github.com>
Jon Banafato <jon@jonafato.com>
Jon Dufresne <jon.dufresne@gmail.com>
Jon Parise <jon@indelible.org>
Jonas Nockert <jonasnockert@gmail.com>
Jonathan Herbert <foohyfooh@gmail.com>
Joost Molenaar <j.j.molenaar@gmail.com>
Jorge Niedbalski <niedbalski@gmail.com>
Joseph Long <jdl@fastmail.fm>
Josh Bronson <jabronson@gmail.com>
Josh Hansen <josh@skwash.net>
Josh Schneier <josh.schneier@gmail.com>
Juanjo Bazán <jjbazan@gmail.com>
Julian Berman <Julian@GrayVines.com>
Julian Gethmann <julian.gethmann@kit.edu>
Julien Demoor <julien@jdemoor.com>
jwg4 <jack.grahl@yahoo.co.uk>
Jyrki Pulliainen <jyrki@spotify.com>
Kai Chen <kaichen120@gmail.com>
Kamal Bin Mustafa <kamal@smach.net>
kaustav haldar <hi@kaustav.me>
keanemind <keanemind@gmail.com>
Keith Maxwell <keith.maxwell@gmail.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Belitzky <kenny@belitzky.com>
Kenneth Reitz <me@kennethreitz.com>
Kenneth Reitz <me@kennethreitz.org>
Kevin Burke <kev@inburke.com>
Kevin Carter <kevin.carter@rackspace.com>
Kevin Frommelt <kevin.frommelt@webfilings.com>
Kevin R Patterson <kevin.r.patterson@intel.com>
Kexuan Sun <me@kianasun.com>
Kit Randel <kit@nocturne.net.nz>
kpinc <kop@meme.com>
Krishna Oza <krishoza15sep@gmail.com>
Kumar McMillan <kumar.mcmillan@gmail.com>
Kyle Persohn <kyle.persohn@gmail.com>
lakshmanaram <lakshmanaram.n@gmail.com>
Laszlo Kiss-Kollar <kiss.kollar.laszlo@gmail.com>
Laurent Bristiel <laurent@bristiel.com>
Laurie Opperman <laurie@sitesee.com.au>
Leon Sasson <leonsassonha@gmail.com>
Lev Givon <lev@columbia.edu>
Lincoln de Sousa <lincoln@comum.org>
Lipis <lipiridis@gmail.com>
Loren Carvalho <lcarvalho@linkedin.com>
Lucas Cimon <lucas.cimon@gmail.com>
Ludovic Gasc <gmludo@gmail.com>
Luke Macken <lmacken@redhat.com>
Luo Jiebin <luo.jiebin@qq.com>
luojiebin <luojiebin@users.noreply.github.com>
luz.paz <luzpaz@users.noreply.github.com>
László Kiss Kollár <lkisskollar@bloomberg.net>
László Kiss Kollár <lkollar@users.noreply.github.com>
Marc Abramowitz <marc@marc-abramowitz.com>
Marc Tamlyn <marc.tamlyn@gmail.com>
Marcus Smith <qwcode@gmail.com>
Mariatta <Mariatta@users.noreply.github.com>
Mark Kohler <mark.kohler@proteinsimple.com>
Mark Williams <markrwilliams@gmail.com>
Mark Williams <mrw@enotuniq.org>
Markus Hametner <fin+github@xbhd.org>
Masaki <mk5986@nyu.edu>
Masklinn <bitbucket.org@masklinn.net>
Matej Stuchlik <mstuchli@redhat.com>
Mathew Jennings <mjennings@foursquare.com>
Mathieu Bridon <bochecha@daitauha.fr>
Matt Good <matt@matt-good.net>
Matt Maker <trip@monstro.us>
Matt Robenolt <matt@ydekproductions.com>
matthew <matthew@trumbell.net>
Matthew Einhorn <moiein2000@gmail.com>
Matthew Gilliard <matthew.gilliard@gmail.com>
Matthew Iversen <teh.ivo@gmail.com>
Matthew Trumbell <matthew@thirdstonepartners.com>
Matthew Willson <matthew@swiftkey.com>
Matthias Bussonnier <bussonniermatthias@gmail.com>
mattip <matti.picus@gmail.com>
Maxim Kurnikov <maxim.kurnikov@gmail.com>
Maxime Rouyrre <rouyrre+git@gmail.com>
mayeut <mayeut@users.noreply.github.com>
mbaluna <44498973+mbaluna@users.noreply.github.com>
mdebi <17590103+mdebi@users.noreply.github.com>
memoselyk <memoselyk@gmail.com>
Michael <michael-k@users.noreply.github.com>
Michael Aquilina <michaelaquilina@gmail.com>
Michael E. Karpeles <michael.karpeles@gmail.com>
Michael Klich <michal@michalklich.com>
Michael Williamson <mike@zwobble.org>
michaelpacer <michaelpacer@gmail.com>
Mickaël Schoentgen <mschoentgen@nuxeo.com>
Miguel Araujo Perez <miguel.araujo.perez@gmail.com>
Mihir Singh <git.service@mihirsingh.com>
Mike <mikeh@blur.com>
Mike Hendricks <mikeh@blur.com>
Min RK <benjaminrk@gmail.com>
MinRK <benjaminrk@gmail.com>
Miro Hrončok <miro@hroncok.cz>
Monica Baluna <mbaluna@bloomberg.net>
montefra <franz.bergesund@gmail.com>
Monty Taylor <mordred@inaugust.com>
Nate Coraor <nate@bx.psu.edu>
Nathaniel J. Smith <njs@pobox.com>
Nehal J Wani <nehaljw.kkd1@gmail.com>
Neil Botelho <neil.botelho321@gmail.com>
Nick Coghlan <ncoghlan@gmail.com>
Nick Stenning <nick@whiteink.com>
Nick Timkovich <prometheus235@gmail.com>
Nicolas Bock <nicolasbock@gmail.com>
Nikhil Benesch <nikhil.benesch@gmail.com>
Nitesh Sharma <nbsharma@outlook.com>
Nowell Strite <nowell@strite.org>
NtaleGrey <Shadikntale@gmail.com>
nvdv <modestdev@gmail.com>
Ofekmeister <ofekmeister@gmail.com>
ofrinevo <ofrine@gmail.com>
Oliver Jeeves <oliver.jeeves@ocado.com>
Oliver Tonnhofer <olt@bogosoft.com>
Olivier Girardot <ssaboum@gmail.com>
Olivier Grisel <olivier.grisel@ensta.org>
Ollie Rutherfurd <orutherfurd@gmail.com>
OMOTO Kenji <k-omoto@m3.com>
Omry Yadan <omry@fb.com>
Oren Held <orenhe@il.ibm.com>
Oscar Benjamin <oscar.j.benjamin@gmail.com>
Oz N Tiram <oz.tiram@gmail.com>
Pachwenko <32424503+Pachwenko@users.noreply.github.com>
Patrick Dubroy <pdubroy@gmail.com>
Patrick Jenkins <patrick@socialgrowthtechnologies.com>
Patrick Lawson <pl@foursquare.com>
patricktokeeffe <patricktokeeffe@users.noreply.github.com>
Patrik Kopkan <pkopkan@redhat.com>
Paul Kehrer <paul.l.kehrer@gmail.com>
Paul Moore <p.f.moore@gmail.com>
Paul Nasrat <pnasrat@gmail.com>
Paul Oswald <pauloswald@gmail.com>
Paul van der Linden <mail@paultjuh.org>
Paulus Schoutsen <paulus@paulusschoutsen.nl>
Pavithra Eswaramoorthy <33131404+QueenCoffee@users.noreply.github.com>
Pawel Jasinski <pawel.jasinski@gmail.com>
Pekka Klärck <peke@iki.fi>
Peter Lisák <peter.lisak@showmax.com>
Peter Waller <peter.waller@gmail.com>
petr-tik <petr-tik@users.noreply.github.com>
Phaneendra Chiruvella <hi@pcx.io>
Phil Freo <phil@philfreo.com>
Phil Pennock <phil@pennock-tech.com>
Phil Whelan <phil123@gmail.com>
Philip Jägenstedt <philip@foolip.org>
Philip Molloy <pamolloy@users.noreply.github.com>
Philippe Ombredanne <pombredanne@gmail.com>
Pi Delport <pjdelport@gmail.com>
Pierre-Yves Rofes <github@rofes.fr>
pip <pypa-dev@googlegroups.com>
Prabakaran Kumaresshan <k_prabakaran+github@hotmail.com>
Prabhjyotsing Surjit Singh Sodhi <psinghsodhi@bloomberg.net>
Prabhu Marappan <prabhum.794@gmail.com>
Pradyun Gedam <pradyunsg@gmail.com>
Pratik Mallya <mallya@us.ibm.com>
Preet Thakkar <preet.thakkar@students.iiit.ac.in>
Preston Holmes <preston@ptone.com>
Przemek Wrzos <hetmankp@none>
Pulkit Goyal <7895pulkit@gmail.com>
Qiangning Hong <hongqn@gmail.com>
Quentin Pradet <quentin.pradet@gmail.com>
R. David Murray <rdmurray@bitdance.com>
Rafael Caricio <rafael.jacinto@gmail.com>
Ralf Schmitt <ralf@systemexit.de>
Razzi Abuissa <razzi53@gmail.com>
rdb <rdb@users.noreply.github.com>
Remi Rampin <r@remirampin.com>
Remi Rampin <remirampin@gmail.com>
Rene Dudfield <renesd@gmail.com>
Riccardo Magliocchetti <riccardo.magliocchetti@gmail.com>
Richard Jones <r1chardj0n3s@gmail.com>
RobberPhex <robberphex@gmail.com>
Robert Collins <rbtcollins@hp.com>
Robert McGibbon <rmcgibbo@gmail.com>
Robert T. McGibbon <rmcgibbo@gmail.com>
robin elisha robinson <elisha.rob@gmail.com>
Roey Berman <roey.berman@gmail.com>
Rohan Jain <crodjer@gmail.com>
Rohan Jain <crodjer@users.noreply.github.com>
Rohan Jain <mail@rohanjain.in>
Roman Bogorodskiy <roman.bogorodskiy@ericsson.com>
Romuald Brunet <romuald@chivil.com>
Ronny Pfannschmidt <Ronny.Pfannschmidt@gmx.de>
Rory McCann <rory@technomancy.org>
Ross Brattain <ross.b.brattain@intel.com>
Roy Wellington Ⅳ <cactus_hugged@yahoo.com>
Roy Wellington Ⅳ <roy@mybasis.com>
Ryan Wooden <rygwdn@gmail.com>
ryneeverett <ryneeverett@gmail.com>
Sachi King <nakato@nakato.io>
Salvatore Rinchiera <salvatore@rinchiera.com>
Savio Jomton <sajo240519@gmail.com>
schlamar <marc.schlaich@gmail.com>
Scott Kitterman <sklist@kitterman.com>
Sean <me@sean.taipei>
seanj <seanj@xyke.com>
Sebastian Jordan <sebastian.jordan.mail@googlemail.com>
Sebastian Schaetz <sschaetz@butterflynetinc.com>
Segev Finer <segev208@gmail.com>
SeongSoo Cho <ppiyakk2@printf.kr>
Sergey Vasilyev <nolar@nolar.info>
Seth Woodworth <seth@sethish.com>
Shlomi Fish <shlomif@shlomifish.org>
Shovan Maity <shovan.maity@mayadata.io>
Simeon Visser <svisser@users.noreply.github.com>
Simon Cross <hodgestar@gmail.com>
Simon Pichugin <simon.pichugin@gmail.com>
sinoroc <sinoroc.code+git@gmail.com>
Sorin Sbarnea <sorin.sbarnea@gmail.com>
Stavros Korokithakis <stavros@korokithakis.net>
Stefan Scherfke <stefan@sofa-rockers.org>
Stephan Erb <github@stephanerb.eu>
stepshal <nessento@openmailbox.org>
Steve (Gadget) Barnes <gadgetsteve@hotmail.com>
Steve Barnes <gadgetsteve@hotmail.com>
Steve Dower <steve.dower@microsoft.com>
Steve Kowalik <steven@wedontsleep.org>
Steven Myint <git@stevenmyint.com>
stonebig <stonebig34@gmail.com>
Stéphane Bidoul (ACSONE) <stephane.bidoul@acsone.eu>
Stéphane Bidoul <stephane.bidoul@acsone.eu>
Stéphane Klein <contact@stephane-klein.info>
Sumana Harihareswara <sh@changeset.nyc>
Sviatoslav Sydorenko <wk.cvs.github@sydorenko.org.ua>
Sviatoslav Sydorenko <wk@sydorenko.org.ua>
Swat009 <swatantra.kumar8@gmail.com>
Takayuki SHIMIZUKAWA <shimizukawa@gmail.com>
tbeswick <tbeswick@enphaseenergy.com>
Thijs Triemstra <info@collab.nl>
Thomas Fenzl <thomas.fenzl@gmail.com>
Thomas Grainger <tagrain@gmail.com>
Thomas Guettler <tguettler@tbz-pariv.de>
Thomas Johansson <devnull@localhost>
Thomas Kluyver <thomas@kluyver.me.uk>
Thomas Smith <smithtg@ncbi.nlm.nih.gov>
Tim D. Smith <github@tim-smith.us>
Tim Gates <tim.gates@iress.com>
Tim Harder <radhermit@gmail.com>
Tim Heap <tim@timheap.me>
tim smith <github@tim-smith.us>
tinruufu <tinruufu@gmail.com>
Tom Forbes <tom@tomforb.es>
Tom Freudenheim <tom.freudenheim@onepeloton.com>
Tom V <tom@viner.tv>
Tomas Orsava <torsava@redhat.com>
Tomer Chachamu <tomer.chachamu@gmail.com>
Tony Beswick <tonybeswick@orcon.net.nz>
Tony Zhaocheng Tan <tony@tonytan.io>
TonyBeswick <TonyBeswick@users.noreply.github.com>
toonarmycaptain <toonarmycaptain@hotmail.com>
Toshio Kuratomi <toshio@fedoraproject.org>
Travis Swicegood <development@domain51.com>
Tzu-ping Chung <uranusjr@gmail.com>
Valentin Haenel <valentin.haenel@gmx.de>
Victor Stinner <victor.stinner@gmail.com>
victorvpaulo <victorvpaulo@gmail.com>
Viktor Szépe <viktor@szepe.net>
Ville Skyttä <ville.skytta@iki.fi>
Vinay Sajip <vinay_sajip@yahoo.co.uk>
Vincent Philippon <sindaewoh@gmail.com>
Vinicyus Macedo <7549205+vinicyusmacedo@users.noreply.github.com>
Vitaly Babiy <vbabiy86@gmail.com>
Vladimir Rutsky <rutsky@users.noreply.github.com>
W. Trevor King <wking@drexel.edu>
Wil Tan <wil@dready.org>
Wilfred Hughes <me@wilfred.me.uk>
William ML Leslie <william.leslie.ttg@gmail.com>
William T Olson <trevor@heytrevor.com>
Wilson Mo <wilsonfv@126.com>
wim glenn <wim.glenn@gmail.com>
Wolfgang Maier <wolfgang.maier@biologie.uni-freiburg.de>
Xavier Fernandez <xav.fernandez@gmail.com>
Xavier Fernandez <xavier.fernandez@polyconseil.fr>
xoviat <xoviat@users.noreply.github.com>
xtreak <tir.karthi@gmail.com>
YAMAMOTO Takashi <yamamoto@midokura.com>
Yen Chi Hsuan <yan12125@gmail.com>
Yeray Diaz Diaz <yeraydiazdiaz@gmail.com>
Yoval P <yoval@gmx.com>
Yu Jian <askingyj@gmail.com>
Yuan Jing Vincent Yan <yyan82@bloomberg.net>
Zearin <zearin@gonk.net>
Zearin <Zearin@users.noreply.github.com>
Zhiping Deng <kofreestyler@gmail.com>
Zvezdan Petkovic <zpetkovic@acm.org>
Łukasz Langa <lukasz@langa.pl>
Семён Марьясин <simeon@maryasin.name>

View File

@@ -0,0 +1,20 @@
Copyright (c) 2008-2019 The pip developers (see AUTHORS.txt file)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,82 @@
Metadata-Version: 2.1
Name: setuptools
Version: 44.0.0
Summary: Easily download, build, install, upgrade, and uninstall Python packages
Home-page: https://github.com/pypa/setuptools
Author: Python Packaging Authority
Author-email: distutils-sig@python.org
License: UNKNOWN
Project-URL: Documentation, https://setuptools.readthedocs.io/
Keywords: CPAN PyPI distutils eggs package management
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Archiving :: Packaging
Classifier: Topic :: System :: Systems Administration
Classifier: Topic :: Utilities
Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
Description-Content-Type: text/x-rst; charset=UTF-8
.. image:: https://img.shields.io/pypi/v/setuptools.svg
:target: https://pypi.org/project/setuptools
.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
:target: https://setuptools.readthedocs.io
.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20CI&logo=travis&logoColor=white
:target: https://travis-ci.org/pypa/setuptools
.. image:: https://img.shields.io/appveyor/ci/pypa/setuptools/master.svg?label=Windows%20CI&logo=appveyor&logoColor=white
:target: https://ci.appveyor.com/project/pypa/setuptools/branch/master
.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
:target: https://codecov.io/gh/pypa/setuptools
.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
:target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
See the `Installation Instructions
<https://packaging.python.org/installing/>`_ in the Python Packaging
User's Guide for instructions on installing, upgrading, and uninstalling
Setuptools.
Questions and comments should be directed to the `distutils-sig
mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
Bug reports and especially tested patches may be
submitted directly to the `bug tracker
<https://github.com/pypa/setuptools/issues>`_.
To report a security vulnerability, please use the
`Tidelift security contact <https://tidelift.com/security>`_.
Tidelift will coordinate the fix and disclosure.
For Enterprise
==============
Available as part of the Tidelift Subscription.
Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
`Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
Code of Conduct
===============
Everyone interacting in the setuptools project's codebases, issue trackers,
chat rooms, and mailing lists is expected to follow the
`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_.

View File

@@ -0,0 +1,89 @@
easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126
setuptools/__init__.py,sha256=WBpCcn2lvdckotabeae1TTYonPOcgCIF3raD2zRWzBc,7283
setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218
setuptools/_imp.py,sha256=jloslOkxrTKbobgemfP94YII0nhqiJzE1bRmCTZ1a5I,2223
setuptools/archive_util.py,sha256=kw8Ib_lKjCcnPKNbS7h8HztRVK0d5RacU3r_KRdVnmM,6592
setuptools/build_meta.py,sha256=-9Nmj9YdbW4zX3TssPJZhsENrTa4fw3k86Jm1cdKMik,9597
setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752
setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
setuptools/config.py,sha256=6SB2OY3qcooOJmG_rsK_s0pKBsorBlDpfMJUyzjQIGk,20575
setuptools/dep_util.py,sha256=fgixvC1R7sH3r13ktyf7N0FALoqEXL1cBarmNpSEoWg,935
setuptools/depends.py,sha256=qt2RWllArRvhnm8lxsyRpcthEZYp4GHQgREl1q0LkFw,5517
setuptools/dist.py,sha256=xtXaNsOsE32MwwQqErzgXJF7jsTQz9GYFRrwnPFQ0J0,49865
setuptools/errors.py,sha256=MVOcv381HNSajDgEUWzOQ4J6B5BHCBMSjHfaWcEwA1o,524
setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729
setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084
setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264
setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
setuptools/installer.py,sha256=TCFRonRo01I79zo-ucf3Ymhj8TenPlmhMijN916aaJs,5337
setuptools/launch.py,sha256=sd7ejwhBocCDx_wG9rIs0OaZ8HtmmFU8ZC6IR_S0Lvg,787
setuptools/lib2to3_ex.py,sha256=t5e12hbR2pi9V4ezWDTB4JM-AISUnGOkmcnYHek3xjg,2013
setuptools/monkey.py,sha256=FGc9fffh7gAxMLFmJs2DW_OYWpBjkdbNS2n14UAK4NA,5264
setuptools/msvc.py,sha256=8baJ6aYgCA4TRdWQQi185qB9dnU8FaP4wgpbmd7VODs,46751
setuptools/namespaces.py,sha256=F0Nrbv8KCT2OrO7rwa03om4N4GZKAlnce-rr-cgDQa8,3199
setuptools/package_index.py,sha256=rqhmbFUEf4WxndnKbtWmj_x8WCuZSuoCgA0K1syyCY8,40616
setuptools/py27compat.py,sha256=tvmer0Tn-wk_JummCkoM22UIjpjL-AQ8uUiOaqTs8sI,1496
setuptools/py31compat.py,sha256=h2rtZghOfwoGYd8sQ0-auaKiF3TcL3qX0bX3VessqcE,838
setuptools/py33compat.py,sha256=SMF9Z8wnGicTOkU1uRNwZ_kz5Z_bj29PUBbqdqeeNsc,1330
setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245
setuptools/sandbox.py,sha256=9UbwfEL5QY436oMI1LtFWohhoZ-UzwHvGyZjUH_qhkw,14276
setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218
setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138
setuptools/site-patch.py,sha256=OumkIHMuoSenRSW1382kKWI1VAwxNE86E5W8iDd34FY,2302
setuptools/ssl_support.py,sha256=nLjPUBBw7RTTx6O4RJZ5eAMGgjJG8beiDbkFXDZpLuM,8493
setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996
setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144
setuptools/wheel.py,sha256=zct-SEj5_LoHg6XELt2cVRdulsUENenCdS1ekM7TlZA,8455
setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714
setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130
setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
setuptools/_vendor/packaging/__about__.py,sha256=CpuMSyh1V7adw8QMjWKkY3LtdqRUkRX4MgJ6nF4stM0,744
setuptools/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
setuptools/_vendor/packaging/_compat.py,sha256=Ugdm-qcneSchW25JrtMIKgUxfEEBcCAz6WrEeXeqz9o,865
setuptools/_vendor/packaging/_structures.py,sha256=pVd90XcXRGwpZRB_qdFuVEibhCHpX_bL5zYr9-N0mc8,1416
setuptools/_vendor/packaging/markers.py,sha256=-meFl9Fr9V8rF5Rduzgett5EHK9wBYRUqssAV2pj0lw,8268
setuptools/_vendor/packaging/requirements.py,sha256=3dwIJekt8RRGCUbgxX8reeAbgmZYjb0wcCRtmH63kxI,4742
setuptools/_vendor/packaging/specifiers.py,sha256=0ZzQpcUnvrQ6LjR-mQRLzMr8G6hdRv-mY0VSf_amFtI,27778
setuptools/_vendor/packaging/tags.py,sha256=EPLXhO6GTD7_oiWEO1U0l0PkfR8R_xivpMDHXnsTlts,12933
setuptools/_vendor/packaging/utils.py,sha256=VaTC0Ei7zO2xl9ARiWmz2YFLFt89PuuhLbAlXMyAGms,1520
setuptools/_vendor/packaging/version.py,sha256=Npdwnb8OHedj_2L86yiUqscujb7w_i5gmSK1PhOAFzg,11978
setuptools/command/__init__.py,sha256=QCAuA9whnq8Bnoc0bBaS6Lw_KAUO0DiHYZQXEMNn5hg,568
setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426
setuptools/command/bdist_egg.py,sha256=nnfV8Ah8IRC_Ifv5Loa9FdxL66MVbyDXwy-foP810zM,18185
setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508
setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637
setuptools/command/build_clib.py,sha256=bQ9aBr-5ZSO-9fGsGsDLz0mnnFteHUZnftVLkhvHDq0,4484
setuptools/command/build_ext.py,sha256=Ib42YUGksBswm2mL5xmQPF6NeTA6HcqrvAtEgFCv32A,13019
setuptools/command/build_py.py,sha256=yWyYaaS9F3o9JbIczn064A5g1C5_UiKRDxGaTqYbtLE,9596
setuptools/command/develop.py,sha256=MQlnGS6uP19erK2JCNOyQYoYyquk3PADrqrrinqqLtA,8184
setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960
setuptools/command/easy_install.py,sha256=0lY8Agxe-7IgMtxgxFuOY1NrDlBzOUlpCKsvayXlTYY,89903
setuptools/command/egg_info.py,sha256=0e_TXrMfpa8nGTO7GmJcmpPCMWzliZi6zt9aMchlumc,25578
setuptools/command/install.py,sha256=8doMxeQEDoK4Eco0mO2WlXXzzp9QnsGJQ7Z7yWkZPG8,4705
setuptools/command/install_egg_info.py,sha256=4zq_Ad3jE-EffParuyDEnvxU6efB-Xhrzdr8aB6Ln_8,3195
setuptools/command/install_lib.py,sha256=9zdc-H5h6RPxjySRhOwi30E_WfcVva7gpfhZ5ata60w,5023
setuptools/command/install_scripts.py,sha256=UD0rEZ6861mTYhIdzcsqKnUl8PozocXWl9VBQ1VTWnc,2439
setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628
setuptools/command/py36compat.py,sha256=SzjZcOxF7zdFUT47Zv2n7AM3H8koDys_0OpS-n9gIfc,4986
setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468
setuptools/command/rotate.py,sha256=co5C1EkI7P0GGT6Tqz-T2SIj2LBJTZXYELpmao6d4KQ,2164
setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658
setuptools/command/sdist.py,sha256=IL1LepD2h8qGKOFJ3rrQVbjNH_Q6ViD40l0QADr4MEU,8088
setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085
setuptools/command/test.py,sha256=u2kXngIIdSYqtvwFlHiN6Iye1IB4TU6uadB2uiV1szw,9602
setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462
setuptools/command/upload_docs.py,sha256=oXiGplM_cUKLwE4CWWw98RzCufAu8tBhMC97GegFcms,7311
setuptools/extern/__init__.py,sha256=4q9gtShB1XFP6CisltsyPqtcfTO6ZM9Lu1QBl3l-qmo,2514
setuptools-44.0.0.dist-info/AUTHORS.txt,sha256=RtqU9KfonVGhI48DAA4-yTOBUhBtQTjFhaDzHoyh7uU,21518
setuptools-44.0.0.dist-info/LICENSE.txt,sha256=W6Ifuwlk-TatfRU2LR7W1JMcyMj5_y1NkRkOEJvnRDE,1090
setuptools-44.0.0.dist-info/METADATA,sha256=L93fcafgVw4xoJUNG0lehyy0prVj-jU_JFxRh0ZUtos,3523
setuptools-44.0.0.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
setuptools-44.0.0.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239
setuptools-44.0.0.dist-info/entry_points.txt,sha256=ZmIqlp-SBdsBS2cuetmU2NdSOs4DG0kxctUR9UJ8Xk0,3150
setuptools-44.0.0.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38
setuptools-44.0.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
setuptools-44.0.0.dist-info/RECORD,,

View File

@@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.34.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View File

@@ -0,0 +1,2 @@
https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d
https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2

View File

@@ -0,0 +1,68 @@
[console_scripts]
easy_install = setuptools.command.easy_install:main
[distutils.commands]
alias = setuptools.command.alias:alias
bdist_egg = setuptools.command.bdist_egg:bdist_egg
bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst
build_clib = setuptools.command.build_clib:build_clib
build_ext = setuptools.command.build_ext:build_ext
build_py = setuptools.command.build_py:build_py
develop = setuptools.command.develop:develop
dist_info = setuptools.command.dist_info:dist_info
easy_install = setuptools.command.easy_install:easy_install
egg_info = setuptools.command.egg_info:egg_info
install = setuptools.command.install:install
install_egg_info = setuptools.command.install_egg_info:install_egg_info
install_lib = setuptools.command.install_lib:install_lib
install_scripts = setuptools.command.install_scripts:install_scripts
rotate = setuptools.command.rotate:rotate
saveopts = setuptools.command.saveopts:saveopts
sdist = setuptools.command.sdist:sdist
setopt = setuptools.command.setopt:setopt
test = setuptools.command.test:test
upload_docs = setuptools.command.upload_docs:upload_docs
[distutils.setup_keywords]
convert_2to3_doctests = setuptools.dist:assert_string_list
dependency_links = setuptools.dist:assert_string_list
eager_resources = setuptools.dist:assert_string_list
entry_points = setuptools.dist:check_entry_points
exclude_package_data = setuptools.dist:check_package_data
extras_require = setuptools.dist:check_extras
include_package_data = setuptools.dist:assert_bool
install_requires = setuptools.dist:check_requirements
namespace_packages = setuptools.dist:check_nsp
package_data = setuptools.dist:check_package_data
packages = setuptools.dist:check_packages
python_requires = setuptools.dist:check_specifier
setup_requires = setuptools.dist:check_requirements
test_loader = setuptools.dist:check_importable
test_runner = setuptools.dist:check_importable
test_suite = setuptools.dist:check_test_suite
tests_require = setuptools.dist:check_requirements
use_2to3 = setuptools.dist:assert_bool
use_2to3_exclude_fixers = setuptools.dist:assert_string_list
use_2to3_fixers = setuptools.dist:assert_string_list
zip_safe = setuptools.dist:assert_bool
[egg_info.writers]
PKG-INFO = setuptools.command.egg_info:write_pkg_info
dependency_links.txt = setuptools.command.egg_info:overwrite_arg
depends.txt = setuptools.command.egg_info:warn_depends_obsolete
eager_resources.txt = setuptools.command.egg_info:overwrite_arg
entry_points.txt = setuptools.command.egg_info:write_entries
namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
requires.txt = setuptools.command.egg_info:write_requirements
top_level.txt = setuptools.command.egg_info:write_toplevel_names
[setuptools.finalize_distribution_options]
2to3_doctests = setuptools.dist:Distribution._finalize_2to3_doctests
features = setuptools.dist:Distribution._finalize_feature_opts
keywords = setuptools.dist:Distribution._finalize_setup_keywords
parent_finalize = setuptools.dist:_Distribution.finalize_options
[setuptools.installation]
eggsecutable = setuptools.command.easy_install:bootstrap

View File

@@ -0,0 +1,3 @@
easy_install
pkg_resources
setuptools

View File

@@ -0,0 +1,7 @@
class SetuptoolsDeprecationWarning(Warning):
"""
Base class for warning deprecations in ``setuptools``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""

View File

@@ -0,0 +1,73 @@
"""
Re-implementation of find_module and get_frozen_object
from the deprecated imp module.
"""
import os
import importlib.util
import importlib.machinery
from .py34compat import module_from_spec
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
C_BUILTIN = 6
PY_FROZEN = 7
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
spec = importlib.util.find_spec(module, paths)
if spec is None:
raise ImportError("Can't find %s" % module)
if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
kind = -1
file = None
static = isinstance(spec.loader, type)
if spec.origin == 'frozen' or static and issubclass(
spec.loader, importlib.machinery.FrozenImporter):
kind = PY_FROZEN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.origin == 'built-in' or static and issubclass(
spec.loader, importlib.machinery.BuiltinImporter):
kind = C_BUILTIN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.has_location:
path = spec.origin
suffix = os.path.splitext(path)[1]
mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
if suffix in importlib.machinery.SOURCE_SUFFIXES:
kind = PY_SOURCE
elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
kind = PY_COMPILED
elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
kind = C_EXTENSION
if kind in {PY_SOURCE, PY_COMPILED}:
file = open(path, mode)
else:
path = None
suffix = mode = ''
return file, path, (suffix, mode, kind)
def get_frozen_object(module, paths=None):
spec = importlib.util.find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return spec.loader.get_code(module)
def get_module(module, paths, info):
spec = importlib.util.find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return module_from_spec(spec)

Binary file not shown.

View File

@@ -0,0 +1,659 @@
from __future__ import absolute_import, unicode_literals
import io
import os
import sys
import warnings
import functools
from collections import defaultdict
from functools import partial
from functools import wraps
from importlib import import_module
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.extern.packaging.version import LegacyVersion, parse
from setuptools.extern.packaging.specifiers import SpecifierSet
from setuptools.extern.six import string_types, PY3
__metaclass__ = type
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = 'get_{key}'.format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors,
distribution.package_dir)
meta.parse()
return meta, options
class ConfigHandler:
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
'Only strings are accepted for the {0} field, '
'files are not accepted'.format(key))
return value
return parser
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, string_types):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive):]
filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
return '\n'.join(
cls._read_file(path)
for path in filepaths
if (cls._assert_local(path) or True)
and os.path.isfile(path)
)
@staticmethod
def _assert_local(filepath):
if not filepath.startswith(os.getcwd()):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
@staticmethod
def _read_file(filepath):
with io.open(filepath, encoding='utf-8') as f:
return f.read()
@classmethod
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[''])
sys.path.insert(0, parent_path)
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are translated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(self, target_obj, options, ignore_option_errors=False,
package_dir=None):
super(ConfigMetadataHandler, self).__init__(target_obj, options,
ignore_option_errors)
self.package_dir = package_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
DeprecationWarning),
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': exclude_files_parser('license'),
'license_files': parse_list,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
'project_urls': parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
if isinstance(parse(version), LegacyVersion):
tmpl = (
'Version loaded from {value} does not '
'comply with PEP 440: {version}'
)
raise DistutilsOptionError(tmpl.format(**locals()))
return version
version = self._parse_attr(value, self.package_dir)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
'py_modules': parse_list,
'python_requires': SpecifierSet,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
findns = trimmed_value == find_directives[1]
if findns and not PY3:
raise DistutilsOptionError(
'find_namespace: directive is unsupported on Python < 3.3')
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
if findns:
from setuptools import find_namespace_packages as find_packages
else:
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = [(k, v) for k, v in parsed.items()]

View File

@@ -0,0 +1,23 @@
from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets

View File

@@ -0,0 +1,176 @@
import sys
import marshal
import contextlib
from distutils.version import StrictVersion
from .py33compat import Bytecode
from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import py27compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(
self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def maybe_close(f):
@contextlib.contextmanager
def empty():
yield
return
if not f:
return empty()
return contextlib.closing(f)
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = py27compat.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
imported = py27compat.get_module(module, paths, info)
return getattr(imported, symbol, None)
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()

View File

@@ -0,0 +1,16 @@
"""setuptools.errors
Provides exceptions used by setuptools modules.
"""
from distutils.errors import DistutilsError
class RemovedCommandError(DistutilsError, RuntimeError):
"""Error used for commands that have been removed in setuptools.
Since ``setuptools`` is built on ``distutils``, simply removing a command
from ``setuptools`` will make the behavior fall back to ``distutils``; this
error is raised if a command exists in ``distutils`` but has been actively
removed in ``setuptools``.
"""

View File

@@ -0,0 +1,174 @@
"""
Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
Changes include:
* `yield from` and PEP3102 `*` removed.
* Hidden files are not ignored.
"""
import os
import re
import fnmatch
__all__ = ["glob", "iglob", "escape"]
def glob(pathname, recursive=False):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
return list(iglob(pathname, recursive=recursive))
def iglob(pathname, recursive=False):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
it = _iglob(pathname, recursive)
if recursive and _isrecursive(pathname):
s = next(it) # skip empty string
assert not s
return it
def _iglob(pathname, recursive):
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
if recursive and _isrecursive(basename):
for x in glob2(dirname, basename):
yield x
else:
for x in glob1(dirname, basename):
yield x
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = _iglob(dirname, recursive)
else:
dirs = [dirname]
if has_magic(basename):
if recursive and _isrecursive(basename):
glob_in_dir = glob2
else:
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
if isinstance(pattern, bytes):
dirname = os.curdir.encode('ASCII')
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except OSError:
return []
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if not basename:
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
# This helper function recursively yields relative pathnames inside a literal
# directory.
def glob2(dirname, pattern):
assert _isrecursive(pattern)
yield pattern[:0]
for x in _rlistdir(dirname):
yield x
# Recursively yields relative pathnames inside a literal directory.
def _rlistdir(dirname):
if not dirname:
if isinstance(dirname, bytes):
dirname = os.curdir.encode('ASCII')
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return
for x in names:
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path):
yield os.path.join(x, y)
magic_check = re.compile('([*?[])')
magic_check_bytes = re.compile(b'([*?[])')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _isrecursive(pattern):
if isinstance(pattern, bytes):
return pattern == b'**'
else:
return pattern == '**'
def escape(pathname):
"""Escape all special characters.
"""
# Escaping is done by wrapping any of "*?[" between square brackets.
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = magic_check_bytes.sub(br'[\1]', pathname)
else:
pathname = magic_check.sub(r'[\1]', pathname)
return drive + pathname

View File

@@ -0,0 +1,179 @@
"""
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata():
"""Patch write_pkg_file and read_pkg_file for higher metadata standards"""
for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass

View File

@@ -0,0 +1,60 @@
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
import platform
from setuptools.extern import six
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if six.PY2:
def get_all_headers(message, key):
return message.getheaders(key)
linux_py2_ascii = (
platform.system() == 'Linux' and
six.PY2
)
rmtree_safe = str if linux_py2_ascii else lambda x: x
"""Workaround for http://bugs.python.org/issue24672"""
try:
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from ._imp import get_frozen_object, get_module
except ImportError:
import imp
from imp import PY_COMPILED, PY_FROZEN, PY_SOURCE # noqa
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == imp.PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_frozen_object(module, paths):
return imp.get_frozen_object(module)
def get_module(module, paths, info):
imp.load_module(module, *info)
return sys.modules[module]

View File

@@ -0,0 +1,32 @@
__all__ = []
__metaclass__ = type
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory:
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self, **kwargs):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp(**kwargs)
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None

View File

@@ -0,0 +1,13 @@
import importlib
try:
import importlib.util
except ImportError:
pass
try:
module_from_spec = importlib.util.module_from_spec
except AttributeError:
def module_from_spec(spec):
return spec.loader.load_module(spec.name)

View File

@@ -0,0 +1,6 @@
# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
__requires__ = %(spec)r
__import__('pkg_resources').require(%(spec)r)
__file__ = %(dev_path)r
with open(__file__) as f:
exec(compile(f.read(), __file__, 'exec'))

View File

@@ -0,0 +1,3 @@
# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
__requires__ = %(spec)r
__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)

View File

@@ -0,0 +1,74 @@
def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys, 'path_importer_cache', {})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
for item in stdpath:
if item == mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python 3
stream, path, descr = imp.find_module('site', [item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site', stream, path, descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys, '__egginsert', 0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np == nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__ == 'site':
__boot()
del __boot

View File

@@ -0,0 +1,44 @@
import unicodedata
import sys
from setuptools.extern import six
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, six.text_type):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
def filesys_decode(path):
"""
Ensure that the given path is decoded,
NONE when no expected encoding works
"""
if isinstance(path, six.text_type):
return path
fs_enc = sys.getfilesystemencoding() or 'utf-8'
candidates = fs_enc, 'utf-8'
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue
def try_encode(string, enc):
"turn unicode encoding into a functional routine"
try:
return string.encode(enc)
except UnicodeEncodeError:
return None

1
poster/lib64 Symbolic link
View File

@@ -0,0 +1 @@
lib

3
poster/pyvenv.cfg Normal file
View File

@@ -0,0 +1,3 @@
home = /bin
include-system-site-packages = false
version = 3.8.10

View File

@@ -1,3 +1,34 @@
atproto==0.0.30
Mastodon.py==1.8.0
annotated-types==0.6.0
anyio==4.2.0
arrow==1.3.0
atproto==0.0.37
blurhash==1.1.4
certifi==2023.11.17
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
cryptography==41.0.7
decorator==5.1.1
dnspython==2.4.2
exceptiongroup==1.2.0
h11==0.14.0
httpcore==1.0.2
httpx==0.25.2
idna==3.6
libipld==1.1.0
Mastodon.py==1.8.1
oauthlib==3.2.2
pycparser==2.21
pydantic==2.5.3
pydantic-core==2.14.6
python-dateutil==2.8.2
python-magic==0.4.27
requests==2.31.0
requests-oauthlib==1.3.1
six==1.16.0
sniffio==1.3.0
tweepy==4.14.0
types-python-dateutil==2.8.19.20240106
typing-extensions==4.9.0
urllib3==2.1.0
websockets==12.0

View File

@@ -1,44 +0,0 @@
import os
# Enables/disables crossposting to twitter and mastodon
# Accepted values: True, False
Twitter = True
Mastodon = True
# Enables/disables logging
# Accepted values: True, False
Logging = True
# Sets default posting mode. True means all posts will be crossposted unless otherwise specified,
# False means no posts will be crossposted unless explicitly specified. If no toggle (below) is specified
# postDefault will be treated as True no matter what is set.
# Accepted values: True, False
postDefault = True
# The function to select what posts are crossposted (mis)uses the language function in Bluesky.
# Enter a language here and all posts will be filtered based on if that language is included
# in the post.
# E.g. if you set postDefault to True and add German ("de") as post toggle, all posts including
# German as a language will be skipped. If postDefault is set to False, only posts including
# german will be crossposted. You can use different languages as selectors for Mastodon
# and Twitter. You can have both the actual language of the tweet, and the selector language
# added to the tweet and it will still work.
# Accepted values: Any language tag in quotes (https://en.wikipedia.org/wiki/IETF_language_tag)
mastodonLang = ""
twitterLang = ""
# Sets maximum amount of times poster will retry a failed crosspost.
maxRetries = 5
# Sets max time limit (in hours) for fetching posts. If no database exists, all posts within this time
# period will be posted.
postTimeLimit = 12
# mastodonVisibility sets what visibility should be used when posting to Mastodon. Options are "public" for always public, "unlisted" for always unlisted,
# "private" for always private and "hybrid" for all posts public except responses in threads (meaning first post in a thread is public and the rest unlisted).
mastodonVisibility = "hybrid"
# Override settings with environment variables if they exist
Twitter = os.environ.get('TWITTER_CROSSPOSTING').lower() == 'true' if os.environ.get('TWITTER_CROSSPOSTING') else Twitter
Mastodon = os.environ.get('MASTODON_CROSSPOSTING').lower() == 'true' if os.environ.get('MASTODON_CROSSPOSTING') else Mastodon
Logging = os.environ.get('LOGGING').lower() == 'true' if os.environ.get('LOGGING') else Logging
postDefault = os.environ.get('POST_DEFAULT').lower() == 'true' if os.environ.get('POST_DEFAULT') else postDefault
mastodonLang = os.environ.get('MASTODON_LANG') if os.environ.get('MASTODON_LANG') else mastodonLang
twitterLang = os.environ.get('TWITTER_LANG') if os.environ.get('TWITTER_LANG') else twitterLang
maxRetries = int(os.environ.get('MAX_RETRIES')) if os.environ.get('MAX_RETRIES') else maxRetries
postTimeLimit = int(os.environ.get('POST_TIME_LIMIT')) if os.environ.get('POST_TIME_LIMIT') else postTimeLimit
mastodonVisibility = os.environ.get('MASTODON_VISIBILITY') if os.environ.get('MASTODON_VISIBILITY') else mastodonVisibility

View File

@@ -2,10 +2,12 @@ import os
# All necessary tokens, passwords, etc.
# Your bluesky handle should include your instance, so for example handle.bsky.social if you are on the main one.
bsky_handle = ""
BSKY_HANDLE = ""
# Generate an app password in the settings on bluesky. DO NOT use your main password.
bsky_password = ""
# The mastodon instance your account is on
BSKY_PASSWORD = ""
# Your mastodon handle. Not needed for authentication, but used for making "quote posts".
MASTODON_HANDLE = ""
# The mastodon instance your account is on.
MASTODON_INSTANCE = ""
# Generate your token in the development settings on your mastodon account. Token must have the permissions to
# post statuses (write:statuses)
@@ -18,9 +20,10 @@ TWITTER_ACCESS_TOKEN = ""
TWITTER_ACCESS_TOKEN_SECRET = ""
# Override settings with environment variables if they exist
bsky_handle = os.environ.get('BSKY_HANDLE') if os.environ.get('BSKY_HANDLE') else bsky_handle
bsky_password = os.environ.get('BSKY_PASSWORD') if os.environ.get('BSKY_PASSWORD') else bsky_password
BSKY_HANDLE = os.environ.get('BSKY_HANDLE') if os.environ.get('BSKY_HANDLE') else BSKY_HANDLE
BSKY_PASSWORD = os.environ.get('BSKY_PASSWORD') if os.environ.get('BSKY_PASSWORD') else BSKY_PASSWORD
MASTODON_INSTANCE = os.environ.get('MASTODON_INSTANCE') if os.environ.get('MASTODON_INSTANCE') else MASTODON_INSTANCE
MASTODON_HANDLE = os.environ.get('MASTODON_HANDLE') if os.environ.get('MASTODON_HANDLE') else MASTODON_HANDLE
MASTODON_TOKEN = os.environ.get('MASTODON_TOKEN') if os.environ.get('MASTODON_TOKEN') else MASTODON_TOKEN
TWITTER_APP_KEY = os.environ.get('TWITTER_APP_KEY') if os.environ.get('TWITTER_APP_KEY') else TWITTER_APP_KEY
TWITTER_APP_SECRET = os.environ.get('TWITTER_APP_SECRET') if os.environ.get('TWITTER_APP_SECRET') else TWITTER_APP_SECRET

View File

@@ -1,14 +1,17 @@
# This file contains all necessary file and folder paths. Make sure to end folder paths with "/".
# basePath is the path from root to the lowest common denominator for all of the other paths.
# base_path is the path from root to the lowest common denominator for all of the other paths.
# Using an absolute path is especially important if running via cron.
basePath = "/"
base_path = "./"
# Path to the database file. If you want it somewhere other than directly in the base path you can
# either write the entire path manually, or just add the rest of the path on top of the basePath.
databasePath = basePath + "db/" + "database.json"
database_path = base_path + "db/database.json"
# Path to the cache-file, which keeps track of recent posts, allowing you to limit posts per hours and
# retweet yourself
post_cache_path = base_path + "db/post.cache"
# Path to backup of database.
backupPath = basePath + "db/" + "database.bak"
backup_path = base_path + "backup/" + "database.bak"
# Path for storing logs
logPath = basePath + "logs/"
log_path = base_path + "logs/"
# Path to folder for temporary storage of images
imagePath = basePath + "images/"
image_path = base_path + "images/"

78
settings/settings.py Normal file
View File

@@ -0,0 +1,78 @@
import os
# Enables/disables crossposting to twitter and mastodon
# Accepted values: True, False
Twitter = True
Mastodon = True
# log_level determines what messages will be written to the log.
# "error" means only error messages will be written to the log.
# "verbose" means all messages will be written to the log.
# "none" means no messages will be written to the log (not recommended).
# Accepted values: error, verbose, none
log_level = "verbose"
# visibility sets what visibility should be used when posting to Mastodon. Options are "public" for always public, "unlisted" for always unlisted,
# "private" for always private and "hybrid" for all posts public except responses in threads (meaning first post in a thread is public and the rest unlisted).
# Accepted values: public, private, hybrid
visibility = "hybrid"
# mentions set what is to be done with posts containing a mention of another user. Options are "ignore",
# for crossposting with no change, "skip" for skipping posts with mentions, "strip" for removing
# the starting @ of a username and "url" to replace the username with a link to their bluesky profile.
# Accepted values: ignore, skip, strip, url
mentions = "strip"
# post_default sets default posting mode. True means all posts will be crossposted unless otherwise specified,
# False means no posts will be crossposted unless explicitly specified. If no toggle (below) is specified
# post_default will be treated as True no matter what is set.
# Accepted values: True, False
post_default = True
# The function to select what posts are crossposted (mis)uses the language function in Bluesky.
# Enter a language here and all posts will be filtered based on if that language is included
# in the post.
# E.g. if you set post_default to True and add German ("de") as post toggle, all posts including
# German as a language will be skipped. If post_default is set to False, only posts including
# german will be crossposted. You can use different languages as selectors for Mastodon
# and Twitter. You can have both the actual language of the tweet, and the selector language
# added to the tweet and it will still work.
# Accepted values: Any language tag in quotes (https://en.wikipedia.org/wiki/IETF_language_tag)
mastodon_lang = ""
twitter_lang = ""
# quote_posts determines if quote reposts of other posts should be crossposted with the quoted post included as a link. If False these posts will be ignored.
quote_posts = True
# max_retries sets maximum amount of times poster will retry a failed crosspost.
# Accepted values: Integers greater than 0
max_retries = 5
# post_time_limit sets max time limit (in hours) for fetching posts. If no database exists, all posts within this time
# period will be posted.
# Accepted values: Integers greater than 0
post_time_limit = 12
# max_per_hour limits the amount of posts that can be crossposted withing an hour. 0 means no limit.
# Accepted values: Any integer
max_per_hour = 0
# overflow_posts determines what happens to posts that are not crossposted due to the hourly limit.
# If set to "retry" the poster will attempt to send them again when posts per hour are below the limit.
# If set to "skip" the posts will be skipped and the poster will instead continue on with new posts.
# Accepted values: retry, skip
overflow_posts = "retry"
# Override settings with environment variables if they exist
Twitter = os.environ.get('TWITTER_CROSSPOSTING').lower() == 'true' if os.environ.get('TWITTER_CROSSPOSTING') else Twitter
Mastodon = os.environ.get('MASTODON_CROSSPOSTING').lower() == 'true' if os.environ.get('MASTODON_CROSSPOSTING') else Mastodon
log_level = os.environ.get('LOG_LEVEL').lower() == 'true' if os.environ.get('LOG_LEVEL') else log_level
visibility = os.environ.get('MASTODON_VISIBILITY') if os.environ.get('MASTODON_VISIBILITY') else visibility
mentions = os.environ.get('MENTIONS') if os.environ.get('MENTIONS') else mentions
post_default = os.environ.get('POST_DEFAULT').lower() == 'true' if os.environ.get('POST_DEFAULT') else post_default
mastodon_lang = os.environ.get('MASTODON_LANG') if os.environ.get('MASTODON_LANG') else mastodon_lang
twitter_lang = os.environ.get('TWITTER_LANG') if os.environ.get('TWITTER_LANG') else twitter_lang
quote_posts = os.environ.get('QUOTE_POSTS') if os.environ.get('QUOTE_POSTS') else quote_posts
max_retries = int(os.environ.get('MAX_RETRIES')) if os.environ.get('MAX_RETRIES') else max_retries
post_time_limit = int(os.environ.get('POST_TIME_LIMIT')) if os.environ.get('POST_TIME_LIMIT') else post_time_limit
max_per_hour = int(os.environ.get('MAX_PER_HOUR')) if os.environ.get('MAX_PER_HOUR') else max_per_hour
overflow_posts = int(os.environ.get('OVERFLOW_POST')) if os.environ.get('OVERFLOW_POST') else overflow_posts
max_per_hour = 0
over_flow_posts = "retry"